diff --git a/README.md b/README.md index e4708906..618aca68 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ Currently supported: - Postgres - ClickHouse - AWS Athena +- Databricks - MS-SQL - MySQL - Snowflake @@ -229,6 +230,7 @@ For some database backends some special functionality is available: * cloudsql-postgres: A special `*` character can be used to query all databases accessible by the account * cloudsql-mysql: Same as above +* databricks: Supports access token, OAuth M2M (machine-to-machine) or U2M (user-to-machine) authentication as supported by the [driver](https://github.com/databricks/databricks-sql-go). * rds-postgres: This type of URL expects a working AWS configuration which will use the equivalent of `rds generate-db-auth-token` for the password. For this driver, the `AWS_REGION` environment variable diff --git a/config.go b/config.go index 74debaf1..135c3839 100644 --- a/config.go +++ b/config.go @@ -165,8 +165,15 @@ type connection struct { user string tokenExpirationTime time.Time iteratorValues []string - snowflakeConfig *gosnowflake.Config - snowflakeDSN string + snowflakeConfig *gosnowflake.Config + snowflakeDSN string + // Databricks OAuth configuration + databricksOAuthType string // "m2m" or "u2m" + databricksClientID string + databricksClientSecret string + databricksHost string + databricksHTTPPath string + databricksCatalog string } // Query is an SQL query that is executed on a connection diff --git a/config.yml.dist b/config.yml.dist index 3fc50173..38c575f1 100644 --- a/config.yml.dist +++ b/config.yml.dist @@ -128,7 +128,7 @@ jobs: interval: '5m' connections: # see https://godoc.org/github.com/segmentio/go-athena#Driver.Open - - 'athena://HOST_VALUE_IGNORED/?db=®ion=&output_location=s3://aws-athena-query-results--' + - 'athena://?db=®ion=&output_location=s3://aws-athena-query-results--' queries: - name: "athena_query_rows" help: "Number of rows..." @@ -193,3 +193,25 @@ jobs: query: | SELECT datname::text, usename::text, COUNT(*)::float AS count FROM pg_stat_activity GROUP BY datname, usename; +- name: "databricks" + interval: '5m' + connections: + # Connect to Databricks SQL Warehouse or All Purpose Cluster + # Connect with M2M + # - databricks://your-workspace.cloud.databricks.com/sql/1.0/warehouses/abc123?catalog=main&authType=m2m&clientId=YOUR_CLIENT_ID&clientSecret=YOUR_CLIENT_SECRET + # Connect with a token + - 'databricks://token:TOKEN@INSTANCE.cloud.databricks.com:443/sql/1.0/warehouses/WAREHOUSE_ID' + queries: + - name: "databricks_usage" + help: "Total databricks usage by date" + values: + - "total" + query: | + SELECT + usage_date, + SUM(usage_quantity) AS total + FROM + system.billing.usage + WHERE + usage_date = CURRENT_DATE() + GROUP BY usage_date; diff --git a/go.mod b/go.mod index 1fe37829..b255802c 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/ClickHouse/clickhouse-go/v2 v2.34.0 github.com/aws/aws-sdk-go v1.50.6 github.com/cenkalti/backoff v2.2.1+incompatible + github.com/databricks/databricks-sql-go v1.6.1 github.com/go-kit/log v0.2.1 github.com/go-sql-driver/mysql v1.8.1 github.com/gobwas/glob v0.2.3 @@ -39,6 +40,8 @@ require ( github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/apache/arrow-go/v18 v18.0.0 // indirect + github.com/apache/arrow/go/v12 v12.0.1 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect @@ -54,14 +57,19 @@ require ( github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/coreos/go-oidc/v3 v3.5.0 // indirect github.com/danieljoos/wincred v1.2.2 // indirect + github.com/dnephin/pflag v1.0.7 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/elastic/go-sysinfo v1.11.2 // indirect github.com/elastic/go-windows v1.0.1 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -71,12 +79,16 @@ require ( github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v24.12.23+incompatible // indirect github.com/google/s2a-go v0.1.8 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect @@ -94,8 +106,13 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect + github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect + github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/paulmach/orb v0.11.1 // indirect @@ -104,6 +121,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/rs/zerolog v1.28.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -130,5 +148,6 @@ require ( google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/gotestsum v1.8.2 // indirect howett.net/plist v1.0.1 // indirect ) diff --git a/go.sum b/go.sum index 77baa08f..2b37265d 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,7 @@ cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/cloudsqlconn v1.12.1 h1:crfkYy4TsYXx+n/wELlPthDYaAmDo2olHLv9DmRuJzY= cloud.google.com/go/cloudsqlconn v1.12.1/go.mod h1:Y8x/9e+QsjJNkvOj9mdJ8/ixhE95Ab2H/vsyy0mXWNc= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= @@ -46,6 +47,8 @@ github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7X github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apache/arrow-go/v18 v18.0.0 h1:1dBDaSbH3LtulTyOVYaBCHO3yVRwjV+TZaqn3g6V7ZM= github.com/apache/arrow-go/v18 v18.0.0/go.mod h1:t6+cWRSmKgdQ6HsxisQjok+jBpKGhRDiqcf3p0p/F+A= +github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= +github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/aws/aws-sdk-go v1.50.6 h1:FaXvNwHG3Ri1paUEW16Ahk9zLVqSAdqa1M3phjZR35Q= @@ -99,16 +102,23 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw= +github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= +github.com/databricks/databricks-sql-go v1.6.1 h1:SOAwVdw/N3AZ5ECJYI49SBUncNy61WzOpzlJFZ17O5g= +github.com/databricks/databricks-sql-go v1.6.1/go.mod h1:/FB8hVRN/KGnWStEyz19r2r7TmfBsK8nUv6yMid//tU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -127,14 +137,21 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -155,6 +172,7 @@ github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -171,6 +189,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -180,6 +199,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -193,11 +213,14 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -211,6 +234,12 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -316,9 +345,17 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microsoft/go-mssqldb v1.8.0 h1:7cyZ/AT7ycDsEoWPIXibd+aVKFtteUNhDGf3aobP+tw= @@ -369,8 +406,11 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= @@ -387,6 +427,7 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/snowflakedb/gosnowflake v1.13.3 h1:udARwDZ+Eb7TnihuMno1CaNVUDbJnikWC+8p4RCJQBk= github.com/snowflakedb/gosnowflake v1.13.3/go.mod h1:NUxNYUdyPn9sRoYB/udq/fXBXuhLS3SBTPI2/OT79uc= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -416,6 +457,7 @@ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3i github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= @@ -452,6 +494,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -481,6 +524,7 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -488,13 +532,17 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -504,6 +552,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= @@ -520,18 +569,28 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= @@ -541,6 +600,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= @@ -560,6 +620,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= @@ -577,6 +639,7 @@ google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -602,7 +665,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -621,6 +686,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/gotestsum v1.8.2 h1:szU3TaSz8wMx/uG+w/A2+4JUPwH903YYaMI9yOOYAyI= +gotest.tools/gotestsum v1.8.2/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/job.go b/job.go index 44e2bd06..8dc1e711 100644 --- a/job.go +++ b/job.go @@ -2,6 +2,11 @@ package main import ( "context" + "crypto/rsa" + "crypto/x509" + "database/sql" + "database/sql/driver" + "encoding/pem" "fmt" "net/url" "os" @@ -9,12 +14,12 @@ import ( "strconv" "strings" "time" - "crypto/rsa" - "crypto/x509" - "encoding/pem" _ "github.com/ClickHouse/clickhouse-go/v2" // register the ClickHouse driver "github.com/cenkalti/backoff" + dbsql "github.com/databricks/databricks-sql-go" // register the Databricks driver + "github.com/databricks/databricks-sql-go/auth/oauth/m2m" + "github.com/databricks/databricks-sql-go/auth/oauth/u2m" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/go-sql-driver/mysql" // register the MySQL driver @@ -285,6 +290,56 @@ func (j *Job) updateConnections() { continue } + // Handle Databricks connections + // Supports standard token auth and OAuth (M2M and U2M) + // OAuth M2M: databricks://host/httpPath?catalog=xxx&authType=m2m&clientId=xxx&clientSecret=xxx + // OAuth U2M: databricks://host/httpPath?catalog=xxx&authType=u2m + // Token auth: databricks://token:xxx@host/httpPath?catalog=xxx + if strings.HasPrefix(conn, "databricks://") { + u, err := url.Parse(conn) + if err != nil { + level.Error(j.log).Log("msg", "Failed to parse Databricks URL", "url", conn, "err", err) + continue + } + + user := "" + if u.User != nil { + user = u.User.Username() + } + + // Extract catalog/database from query params or path + queryParams := u.Query() + database := queryParams.Get("catalog") + httpPath := strings.TrimPrefix(u.Path, "/") + + // Check for OAuth authentication type + authType := queryParams.Get("authType") + clientID := queryParams.Get("clientId") + clientSecret := queryParams.Get("clientSecret") + + newConn := &connection{ + conn: nil, + url: conn, + driver: "databricks", + host: u.Host, + database: database, + user: user, + } + + // Configure OAuth if specified + if authType == "m2m" || authType == "u2m" { + newConn.databricksOAuthType = authType + newConn.databricksClientID = clientID + newConn.databricksClientSecret = clientSecret + newConn.databricksHost = u.Host + newConn.databricksHTTPPath = httpPath + newConn.databricksCatalog = database + } + + j.conns = append(j.conns, newConn) + continue + } + if strings.HasPrefix(conn, "rds-postgres://") { // Reuse Postgres driver by stripping "rds-" from connection URL after building the RDS authentication token conn = strings.TrimPrefix(conn, "rds-") @@ -386,10 +441,7 @@ func (j *Job) updateConnections() { user: user, } if newConn.driver == "athena" { - // call go-athena's Open() to ensure conn.db is set, - // otherwise API calls will complain about an empty database field: - // "InvalidParameter: 1 validation error(s) found. - minimum field size of 1, StartQueryExecutionInput.QueryExecutionContext.Database." - newConn.conn, err = sqlx.Open("athena", u.String()) + newConn.conn, err = sqlx.Open("athena", u.RawQuery) if err != nil { level.Error(j.log).Log("msg", "Failed to open Athena connection", "connection", conn, "err", err) continue @@ -401,10 +453,10 @@ func (j *Job) updateConnections() { level.Error(j.log).Log("msg", "Failed to parse Snowflake URL", "url", conn, "err", err) continue } - + queryParams := u.Query() privateKeyPath := os.ExpandEnv(queryParams.Get("private_key_file")) - + cfg := &gosnowflake.Config{ Account: u.Host, User: u.User.Username(), @@ -412,7 +464,7 @@ func (j *Job) updateConnections() { Database: queryParams.Get("database"), Schema: queryParams.Get("schema"), } - + if privateKeyPath != "" { // RSA key auth keyBytes, err := os.ReadFile(privateKeyPath) @@ -420,13 +472,13 @@ func (j *Job) updateConnections() { level.Error(j.log).Log("msg", "Failed to read private key file", "path", privateKeyPath, "err", err) continue } - + keyBlock, _ := pem.Decode(keyBytes) if keyBlock == nil { level.Error(j.log).Log("msg", "Failed to decode PEM block", "path", privateKeyPath) continue } - + var privateKey *rsa.PrivateKey if parsedKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes); err == nil { privateKey, _ = parsedKey.(*rsa.PrivateKey) @@ -436,16 +488,16 @@ func (j *Job) updateConnections() { level.Error(j.log).Log("msg", "Failed to parse private key", "err", err) continue } - + cfg.Authenticator = gosnowflake.AuthTypeJwt cfg.PrivateKey = privateKey - + dsn, err := gosnowflake.DSN(cfg) if err != nil { level.Error(j.log).Log("msg", "Failed to create Snowflake DSN with RSA", "err", err) continue } - + newConn.snowflakeConfig = cfg newConn.snowflakeDSN = dsn newConn.host = u.Host @@ -460,20 +512,20 @@ func (j *Job) updateConnections() { cfg.Port = port } } - + dsn, err := gosnowflake.DSN(cfg) if err != nil { level.Error(j.log).Log("msg", "Failed to create Snowflake DSN with password", "err", err) continue } - + newConn.conn, err = sqlx.Open("snowflake", dsn) if err != nil { level.Error(j.log).Log("msg", "Failed to open Snowflake connection", "err", err) continue } } - + j.conns = append(j.conns, newConn) continue } @@ -651,30 +703,85 @@ func (c *connection) connect(job *Job) error { } c.tokenExpirationTime = time.Now().Add(time.Hour) } - + db, err := sqlx.Open("snowflake", c.snowflakeDSN) if err != nil { return fmt.Errorf("failed to open Snowflake connection: %w (host: %s)", err, c.host) } - + db.SetMaxOpenConns(1) db.SetMaxIdleConns(0) db.SetConnMaxLifetime(30 * time.Minute) - + if err := db.Ping(); err != nil { db.Close() return fmt.Errorf("failed to ping Snowflake: %w (host: %s)", err, c.host) } - + c.conn = db return nil } } + // Handle Databricks OAuth connections (M2M and U2M) + if c.driver == "databricks" && c.databricksOAuthType != "" { + var connector driver.Connector + var err error + + connectorOptions := []dbsql.ConnOption{ + dbsql.WithServerHostname(c.databricksHost), + dbsql.WithHTTPPath(c.databricksHTTPPath), + } + + if c.databricksCatalog != "" { + connectorOptions = append(connectorOptions, dbsql.WithInitialNamespace(c.databricksCatalog, "")) + } + + switch c.databricksOAuthType { + case "m2m": + // Machine-to-Machine OAuth using client credentials + if c.databricksClientID == "" || c.databricksClientSecret == "" { + return fmt.Errorf("databricks M2M OAuth requires clientId and clientSecret parameters") + } + authenticator := m2m.NewAuthenticator(c.databricksClientID, c.databricksClientSecret, c.databricksHost) + connectorOptions = append(connectorOptions, dbsql.WithAuthenticator(authenticator)) + + case "u2m": + // User-to-Machine OAuth (interactive browser-based authentication) + authenticator, err := u2m.NewAuthenticator(c.databricksHost, 2*time.Minute) + if err != nil { + return fmt.Errorf("failed to create Databricks U2M authenticator: %w", err) + } + connectorOptions = append(connectorOptions, dbsql.WithAuthenticator(authenticator)) + } + + connector, err = dbsql.NewConnector(connectorOptions...) + if err != nil { + return fmt.Errorf("failed to create Databricks connector: %w (host: %s)", err, c.host) + } + + db := sql.OpenDB(connector) + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(job.Interval * 2) + + // Wrap with sqlx + c.conn = sqlx.NewDb(db, "databricks") + + // execute StartupSQL + for _, query := range job.StartupSQL { + level.Debug(job.log).Log("msg", "StartupSQL", "Query:", query) + c.conn.MustExec(query) + } + + return nil + } dsn := c.url switch c.driver { case "mysql": dsn = strings.TrimPrefix(dsn, "mysql://") dsn = strings.TrimPrefix(dsn, "rds-mysql://") + case "databricks": + dsn = strings.TrimPrefix(dsn, "databricks://") case "clickhouse+tcp", "clickhouse+http": // Support both http and tcp connections dsn = strings.TrimPrefix(dsn, "clickhouse+") c.driver = "clickhouse" diff --git a/vendor/github.com/apache/arrow/go/v12/LICENSE.txt b/vendor/github.com/apache/arrow/go/v12/LICENSE.txt new file mode 100644 index 00000000..9ea2e1f4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/LICENSE.txt @@ -0,0 +1,1802 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD + +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: + + Copyright (c) 2013 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's FrameOfReference project. + +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the TensorFlow project + +Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the NumPy project. + +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 + +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from the FlatBuffers project + +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The script r/configure has the following license (MIT) + +Copyright (c) 2017, Jeroen Ooms and Jim Hester + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). + +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiล„ski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) + +Copyright (c) 2008-2009 Bjoern Hoehrmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/string_view.hpp has the following license + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utf8cpp/ have the following license + +Copyright 2006 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from CMake. + +* cpp/cmake_modules/FindGTest.cmake is based on code from CMake. + +Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors +Homepage: https://gitlab.kitware.com/cmake/cmake +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- +Google Test llvm/utils/unittest/googletest +OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} +pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} +ARM contributions llvm/lib/Target/ARM/LICENSE.TXT +md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + โ€œCombinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_โ€ by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, ะะปะตะบัะตะน and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +This project includes code from the autobrew project. + +* r/tools/autobrew and dev/tasks/homebrew-formulae/autobrew/apache-arrow.rb + are based on code from the autobrew project. + +Copyright (c) 2019, Jeroen Ooms +License: MIT +Homepage: https://github.com/jeroen/autobrew + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 Renรฉ Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +Renรฉ Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/optional.hpp has the following license + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright ยฉ 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/.editorconfig b/vendor/github.com/apache/arrow/go/v12/arrow/.editorconfig new file mode 100644 index 00000000..a7ceaf93 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/.editorconfig @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +root = true + +[*.tmpl] +indent_style = tab +indent_size = 4 \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/.gitignore b/vendor/github.com/apache/arrow/go/v12/arrow/.gitignore new file mode 100644 index 00000000..d4b831ae --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/.gitignore @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +### Go template +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +bin/ +vendor/ \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.lock b/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.lock new file mode 100644 index 00000000..143e4f93 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.lock @@ -0,0 +1,44 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:2d0dc026c4aef5e2f3a0e06a4dabe268b840d8f63190cf6894e02134a03f52c5" + name = "github.com/stretchr/testify" + packages = ["assert"] + pruneopts = "" + revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" + version = "v1.2.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/pkg/errors", + "github.com/stretchr/testify/assert", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.toml b/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.toml new file mode 100644 index 00000000..b27807d6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.toml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.0" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.1" \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Makefile b/vendor/github.com/apache/arrow/go/v12/arrow/Makefile new file mode 100644 index 00000000..9c4a2326 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/Makefile @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +GO_BUILD=go build +GO_GEN=go generate +GO_TEST?=go test +GOPATH=$(realpath ../../../../../..) + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') +SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) + +.PHONEY: test bench assembly generate + +assembly: + @$(MAKE) -C memory assembly + @$(MAKE) -C math assembly + +generate: bin/tmpl + bin/tmpl -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen_test.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl + bin/tmpl -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl + @$(MAKE) -C math generate + +fmt: $(SOURCES_NO_VENDOR) + goimports -w $^ + +bench: $(GO_SOURCES) | assembly + $(GO_TEST) $(GO_TEST_ARGS) -bench=. -run=- ./... + +bench-noasm: $(GO_SOURCES) + $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' -bench=. -run=- ./... + +test: $(GO_SOURCES) | assembly + $(GO_TEST) $(GO_TEST_ARGS) ./... + +test-noasm: $(GO_SOURCES) + $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' ./... + +bin/tmpl: _tools/tmpl/main.go + $(GO_BUILD) -o $@ ./_tools/tmpl + diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array.go b/vendor/github.com/apache/arrow/go/v12/arrow/array.go new file mode 100644 index 00000000..ffcd20cc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array.go @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "encoding/json" + "fmt" + + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// ArrayData is the underlying memory and metadata of an Arrow array, corresponding +// to the same-named object in the C++ implementation. +// +// The Array interface and subsequent typed objects provide strongly typed +// accessors which support marshalling and other patterns to the data. +// This interface allows direct access to the underlying raw byte buffers +// which allows for manipulating the internal data and casting. For example, +// one could cast the raw bytes from int64 to float64 like so: +// +// arrdata := GetMyInt64Data().Data() +// newdata := array.NewData(arrow.PrimitiveTypes.Float64, arrdata.Len(), +// arrdata.Buffers(), nil, arrdata.NullN(), arrdata.Offset()) +// defer newdata.Release() +// float64arr := array.NewFloat64Data(newdata) +// defer float64arr.Release() +// +// This is also useful in an analytics setting where memory may be reused. For +// example, if we had a group of operations all returning float64 such as: +// +// Log(Sqrt(Expr(arr))) +// +// The low-level implementations could have signatures such as: +// +// func Log(values arrow.ArrayData) arrow.ArrayData +// +// Another example would be a function that consumes one or more memory buffers +// in an input array and replaces them with newly-allocated data, changing the +// output data type as well. +type ArrayData interface { + // Retain increases the reference count by 1, it is safe to call + // in multiple goroutines simultaneously. + Retain() + // Release decreases the reference count by 1, it is safe to call + // in multiple goroutines simultaneously. Data is removed when reference + // count is 0. + Release() + // DataType returns the current datatype stored in the object. + DataType() DataType + // NullN returns the number of nulls for this data instance. + NullN() int + // Len returns the length of this data instance + Len() int + // Offset returns the offset into the raw buffers where this data begins + Offset() int + // Buffers returns the slice of raw data buffers for this data instance. Their + // meaning depends on the context of the data type. + Buffers() []*memory.Buffer + // Children returns the slice of children data instances, only relevant for + // nested data types. For instance, List data will have a single child containing + // elements of all the rows and Struct data will contain numfields children which + // are the arrays for each field of the struct. + Children() []ArrayData + // Reset allows reusing this ArrayData object by replacing the data in this ArrayData + // object without changing the reference count. + Reset(newtype DataType, newlength int, newbuffers []*memory.Buffer, newchildren []ArrayData, newnulls int, newoffset int) + // Dictionary returns the ArrayData object for the dictionary if this is a + // dictionary array, otherwise it will be nil. + Dictionary() ArrayData +} + +// Array represents an immutable sequence of values using the Arrow in-memory format. +type Array interface { + json.Marshaler + + fmt.Stringer + + // DataType returns the type metadata for this instance. + DataType() DataType + + // NullN returns the number of null values in the array. + NullN() int + + // NullBitmapBytes returns a byte slice of the validity bitmap. + NullBitmapBytes() []byte + + // IsNull returns true if value at index is null. + // NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i โ‰ฅ Len. + IsNull(i int) bool + + // IsValid returns true if value at index is not null. + // NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i โ‰ฅ Len. + IsValid(i int) bool + // ValueStr returns the value at index as a string. + ValueStr(i int) string + + // Get single value to be marshalled with `json.Marshal` + GetOneForMarshal(i int) interface{} + + Data() ArrayData + + // Len returns the number of elements in the array. + Len() int + + // Retain increases the reference count by 1. + // Retain may be called simultaneously from multiple goroutines. + Retain() + + // Release decreases the reference count by 1. + // Release may be called simultaneously from multiple goroutines. + // When the reference count goes to zero, the memory is freed. + Release() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/array.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/array.go new file mode 100644 index 00000000..6231ae9a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/array.go @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +const ( + // UnknownNullCount specifies the NullN should be calculated from the null bitmap buffer. + UnknownNullCount = -1 + NullValueStr = "(null)" +) + +type array struct { + refCount int64 + data *Data + nullBitmapBytes []byte +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (a *array) Retain() { + atomic.AddInt64(&a.refCount, 1) +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (a *array) Release() { + debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases") + + if atomic.AddInt64(&a.refCount, -1) == 0 { + a.data.Release() + a.data, a.nullBitmapBytes = nil, nil + } +} + +// DataType returns the type metadata for this instance. +func (a *array) DataType() arrow.DataType { return a.data.dtype } + +// NullN returns the number of null values in the array. +func (a *array) NullN() int { + if a.data.nulls < 0 { + a.data.nulls = a.data.length - bitutil.CountSetBits(a.nullBitmapBytes, a.data.offset, a.data.length) + } + return a.data.nulls +} + +// NullBitmapBytes returns a byte slice of the validity bitmap. +func (a *array) NullBitmapBytes() []byte { return a.nullBitmapBytes } + +func (a *array) Data() arrow.ArrayData { return a.data } + +// Len returns the number of elements in the array. +func (a *array) Len() int { return a.data.length } + +// IsNull returns true if value at index is null. +// NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i โ‰ฅ Len. +func (a *array) IsNull(i int) bool { + return len(a.nullBitmapBytes) != 0 && bitutil.BitIsNotSet(a.nullBitmapBytes, a.data.offset+i) +} + +// IsValid returns true if value at index is not null. +// NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i โ‰ฅ Len. +func (a *array) IsValid(i int) bool { + return len(a.nullBitmapBytes) == 0 || bitutil.BitIsSet(a.nullBitmapBytes, a.data.offset+i) +} + +func (a *array) setData(data *Data) { + // Retain before releasing in case a.data is the same as data. + data.Retain() + + if a.data != nil { + a.data.Release() + } + + if len(data.buffers) > 0 && data.buffers[0] != nil { + a.nullBitmapBytes = data.buffers[0].Bytes() + } + a.data = data +} + +func (a *array) Offset() int { + return a.data.Offset() +} + +type arrayConstructorFn func(arrow.ArrayData) arrow.Array + +var ( + makeArrayFn [64]arrayConstructorFn +) + +func invalidDataType(data arrow.ArrayData) arrow.Array { + panic("invalid data type: " + data.DataType().ID().String()) +} + +// MakeFromData constructs a strongly-typed array instance from generic Data. +func MakeFromData(data arrow.ArrayData) arrow.Array { + return makeArrayFn[byte(data.DataType().ID()&0x3f)](data) +} + +// NewSlice constructs a zero-copy slice of the array with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned array must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the input array. +// NewSlice panics if j < i. +func NewSlice(arr arrow.Array, i, j int64) arrow.Array { + data := NewSliceData(arr.Data(), i, j) + slice := MakeFromData(data) + data.Release() + return slice +} + +func init() { + makeArrayFn = [...]arrayConstructorFn{ + arrow.NULL: func(data arrow.ArrayData) arrow.Array { return NewNullData(data) }, + arrow.BOOL: func(data arrow.ArrayData) arrow.Array { return NewBooleanData(data) }, + arrow.UINT8: func(data arrow.ArrayData) arrow.Array { return NewUint8Data(data) }, + arrow.INT8: func(data arrow.ArrayData) arrow.Array { return NewInt8Data(data) }, + arrow.UINT16: func(data arrow.ArrayData) arrow.Array { return NewUint16Data(data) }, + arrow.INT16: func(data arrow.ArrayData) arrow.Array { return NewInt16Data(data) }, + arrow.UINT32: func(data arrow.ArrayData) arrow.Array { return NewUint32Data(data) }, + arrow.INT32: func(data arrow.ArrayData) arrow.Array { return NewInt32Data(data) }, + arrow.UINT64: func(data arrow.ArrayData) arrow.Array { return NewUint64Data(data) }, + arrow.INT64: func(data arrow.ArrayData) arrow.Array { return NewInt64Data(data) }, + arrow.FLOAT16: func(data arrow.ArrayData) arrow.Array { return NewFloat16Data(data) }, + arrow.FLOAT32: func(data arrow.ArrayData) arrow.Array { return NewFloat32Data(data) }, + arrow.FLOAT64: func(data arrow.ArrayData) arrow.Array { return NewFloat64Data(data) }, + arrow.STRING: func(data arrow.ArrayData) arrow.Array { return NewStringData(data) }, + arrow.BINARY: func(data arrow.ArrayData) arrow.Array { return NewBinaryData(data) }, + arrow.FIXED_SIZE_BINARY: func(data arrow.ArrayData) arrow.Array { return NewFixedSizeBinaryData(data) }, + arrow.DATE32: func(data arrow.ArrayData) arrow.Array { return NewDate32Data(data) }, + arrow.DATE64: func(data arrow.ArrayData) arrow.Array { return NewDate64Data(data) }, + arrow.TIMESTAMP: func(data arrow.ArrayData) arrow.Array { return NewTimestampData(data) }, + arrow.TIME32: func(data arrow.ArrayData) arrow.Array { return NewTime32Data(data) }, + arrow.TIME64: func(data arrow.ArrayData) arrow.Array { return NewTime64Data(data) }, + arrow.INTERVAL_MONTHS: func(data arrow.ArrayData) arrow.Array { return NewMonthIntervalData(data) }, + arrow.INTERVAL_DAY_TIME: func(data arrow.ArrayData) arrow.Array { return NewDayTimeIntervalData(data) }, + arrow.DECIMAL128: func(data arrow.ArrayData) arrow.Array { return NewDecimal128Data(data) }, + arrow.DECIMAL256: func(data arrow.ArrayData) arrow.Array { return NewDecimal256Data(data) }, + arrow.LIST: func(data arrow.ArrayData) arrow.Array { return NewListData(data) }, + arrow.STRUCT: func(data arrow.ArrayData) arrow.Array { return NewStructData(data) }, + arrow.SPARSE_UNION: func(data arrow.ArrayData) arrow.Array { return NewSparseUnionData(data) }, + arrow.DENSE_UNION: func(data arrow.ArrayData) arrow.Array { return NewDenseUnionData(data) }, + arrow.DICTIONARY: func(data arrow.ArrayData) arrow.Array { return NewDictionaryData(data) }, + arrow.MAP: func(data arrow.ArrayData) arrow.Array { return NewMapData(data) }, + arrow.EXTENSION: func(data arrow.ArrayData) arrow.Array { return NewExtensionData(data) }, + arrow.FIXED_SIZE_LIST: func(data arrow.ArrayData) arrow.Array { return NewFixedSizeListData(data) }, + arrow.DURATION: func(data arrow.ArrayData) arrow.Array { return NewDurationData(data) }, + arrow.LARGE_STRING: func(data arrow.ArrayData) arrow.Array { return NewLargeStringData(data) }, + arrow.LARGE_BINARY: func(data arrow.ArrayData) arrow.Array { return NewLargeBinaryData(data) }, + arrow.LARGE_LIST: func(data arrow.ArrayData) arrow.Array { return NewLargeListData(data) }, + arrow.INTERVAL_MONTH_DAY_NANO: func(data arrow.ArrayData) arrow.Array { return NewMonthDayNanoIntervalData(data) }, + arrow.RUN_END_ENCODED: func(data arrow.ArrayData) arrow.Array { return NewRunEndEncodedData(data) }, + + // invalid data types to fill out array to size 2^6 - 1 + 63: invalidDataType, + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go new file mode 100644 index 00000000..0cc8e216 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go @@ -0,0 +1,323 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "encoding/base64" + "fmt" + "strings" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/goccy/go-json" +) + +type BinaryLike interface { + arrow.Array + ValueBytes() []byte + ValueOffset64(int) int64 +} + +// A type which represents an immutable sequence of variable-length binary strings. +type Binary struct { + array + valueOffsets []int32 + valueBytes []byte +} + +// NewBinaryData constructs a new Binary array from data. +func NewBinaryData(data arrow.ArrayData) *Binary { + a := &Binary{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Value returns the slice at index i. This value should not be mutated. +func (a *Binary) Value(i int) []byte { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + idx := a.array.data.offset + i + return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]] +} + +// ValueString returns the string at index i +func (a *Binary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} + +// ValueStr returns the string at index i without performing additional allocations. +// The string is only valid for the lifetime of the Binary array. +func (a *Binary) ValueString(i int) string { + b := a.Value(i) + return *(*string)(unsafe.Pointer(&b)) +} + +func (a *Binary) ValueOffset(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return int(a.valueOffsets[a.array.data.offset+i]) +} + +func (a *Binary) ValueOffset64(i int) int64 { + return int64(a.ValueOffset(i)) +} + +func (a *Binary) ValueLen(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + beg := a.array.data.offset + i + return int(a.valueOffsets[beg+1] - a.valueOffsets[beg]) +} + +func (a *Binary) ValueOffsets() []int32 { + beg := a.array.data.offset + end := beg + a.array.data.length + 1 + return a.valueOffsets[beg:end] +} + +func (a *Binary) ValueBytes() []byte { + beg := a.array.data.offset + end := beg + a.array.data.length + return a.valueBytes[a.valueOffsets[beg]:a.valueOffsets[end]] +} + +func (a *Binary) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.ValueString(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Binary) setData(data *Data) { + if len(data.buffers) != 3 { + panic("len(data.buffers) != 3") + } + + a.array.setData(data) + + if valueData := data.buffers[2]; valueData != nil { + a.valueBytes = valueData.Bytes() + } + + if valueOffsets := data.buffers[1]; valueOffsets != nil { + a.valueOffsets = arrow.Int32Traits.CastFromBytes(valueOffsets.Bytes()) + } + + if a.array.data.length < 1 { + return + } + + expNumOffsets := a.array.data.offset + a.array.data.length + 1 + if len(a.valueOffsets) < expNumOffsets { + panic(fmt.Errorf("arrow/array: binary offset buffer must have at least %d values", expNumOffsets)) + } + + if int(a.valueOffsets[expNumOffsets-1]) > len(a.valueBytes) { + panic("arrow/array: binary offsets out of bounds of data buffer") + } +} + +func (a *Binary) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.Value(i) +} + +func (a *Binary) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + // golang marshal standard says that []byte will be marshalled + // as a base64-encoded string + return json.Marshal(vals) +} + +func arrayEqualBinary(left, right *Binary) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !bytes.Equal(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +type LargeBinary struct { + array + valueOffsets []int64 + valueBytes []byte +} + +func NewLargeBinaryData(data arrow.ArrayData) *LargeBinary { + a := &LargeBinary{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *LargeBinary) Value(i int) []byte { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + idx := a.array.data.offset + i + return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]] +} + +func (a *LargeBinary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} +func (a *LargeBinary) ValueString(i int) string { + b := a.Value(i) + return *(*string)(unsafe.Pointer(&b)) +} + +func (a *LargeBinary) ValueOffset(i int) int64 { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return a.valueOffsets[a.array.data.offset+i] +} + +func (a *LargeBinary) ValueOffset64(i int) int64 { + return a.ValueOffset(i) +} + +func (a *LargeBinary) ValueLen(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + beg := a.array.data.offset + i + return int(a.valueOffsets[beg+1] - a.valueOffsets[beg]) +} + +func (a *LargeBinary) ValueOffsets() []int64 { + beg := a.array.data.offset + end := beg + a.array.data.length + 1 + return a.valueOffsets[beg:end] +} + +func (a *LargeBinary) ValueBytes() []byte { + beg := a.array.data.offset + end := beg + a.array.data.length + return a.valueBytes[a.valueOffsets[beg]:a.valueOffsets[end]] +} + +func (a *LargeBinary) String() string { + var o strings.Builder + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(&o, "%q", a.ValueString(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *LargeBinary) setData(data *Data) { + if len(data.buffers) != 3 { + panic("len(data.buffers) != 3") + } + + a.array.setData(data) + + if valueData := data.buffers[2]; valueData != nil { + a.valueBytes = valueData.Bytes() + } + + if valueOffsets := data.buffers[1]; valueOffsets != nil { + a.valueOffsets = arrow.Int64Traits.CastFromBytes(valueOffsets.Bytes()) + } + + if a.array.data.length < 1 { + return + } + + expNumOffsets := a.array.data.offset + a.array.data.length + 1 + if len(a.valueOffsets) < expNumOffsets { + panic(fmt.Errorf("arrow/array: large binary offset buffer must have at least %d values", expNumOffsets)) + } + + if int(a.valueOffsets[expNumOffsets-1]) > len(a.valueBytes) { + panic("arrow/array: large binary offsets out of bounds of data buffer") + } +} + +func (a *LargeBinary) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.Value(i) +} + +func (a *LargeBinary) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + // golang marshal standard says that []byte will be marshalled + // as a base64-encoded string + return json.Marshal(vals) +} + +func arrayEqualLargeBinary(left, right *LargeBinary) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !bytes.Equal(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +var ( + _ arrow.Array = (*Binary)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go new file mode 100644 index 00000000..401587e0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go @@ -0,0 +1,364 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "encoding/base64" + "fmt" + "math" + "reflect" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// A BinaryBuilder is used to build a Binary array using the Append methods. +type BinaryBuilder struct { + builder + + dtype arrow.BinaryDataType + offsets bufBuilder + values *byteBufferBuilder + + appendOffsetVal func(int) + getOffsetVal func(int) int + maxCapacity uint64 + offsetByteWidth int +} + +// NewBinaryBuilder can be used for any of the variable length binary types, +// Binary, LargeBinary, String, LargeString by passing the appropriate data type +func NewBinaryBuilder(mem memory.Allocator, dtype arrow.BinaryDataType) *BinaryBuilder { + var ( + offsets bufBuilder + offsetValFn func(int) + maxCapacity uint64 + offsetByteWidth int + getOffsetVal func(int) int + ) + switch dtype.Layout().Buffers[1].ByteWidth { + case 4: + b := newInt32BufferBuilder(mem) + offsetValFn = func(v int) { b.AppendValue(int32(v)) } + getOffsetVal = func(i int) int { return int(b.Value(i)) } + offsets = b + maxCapacity = math.MaxInt32 + offsetByteWidth = arrow.Int32SizeBytes + case 8: + b := newInt64BufferBuilder(mem) + offsetValFn = func(v int) { b.AppendValue(int64(v)) } + getOffsetVal = func(i int) int { return int(b.Value(i)) } + offsets = b + maxCapacity = math.MaxInt64 + offsetByteWidth = arrow.Int64SizeBytes + } + + b := &BinaryBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + offsets: offsets, + values: newByteBufferBuilder(mem), + appendOffsetVal: offsetValFn, + maxCapacity: maxCapacity, + offsetByteWidth: offsetByteWidth, + getOffsetVal: getOffsetVal, + } + return b +} + +func (b *BinaryBuilder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *BinaryBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.offsets != nil { + b.offsets.Release() + b.offsets = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *BinaryBuilder) Append(v []byte) { + b.Reserve(1) + b.appendNextOffset() + b.values.Append(v) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *BinaryBuilder) AppendString(v string) { + b.Append([]byte(v)) +} + +func (b *BinaryBuilder) AppendNull() { + b.Reserve(1) + b.appendNextOffset() + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *BinaryBuilder) AppendEmptyValue() { + b.Reserve(1) + b.appendNextOffset() + b.UnsafeAppendBoolToBitmap(true) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *BinaryBuilder) AppendValues(v [][]byte, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + b.appendNextOffset() + b.values.Append(vv) + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +// AppendStringValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *BinaryBuilder) AppendStringValues(v []string, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + b.appendNextOffset() + b.values.Append([]byte(vv)) + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *BinaryBuilder) UnsafeAppend(v []byte) { + b.appendNextOffset() + b.values.unsafeAppend(v) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *BinaryBuilder) Value(i int) []byte { + start := b.getOffsetVal(i) + var end int + if i == (b.length - 1) { + end = b.values.Len() + } else { + end = b.getOffsetVal(i + 1) + } + return b.values.Bytes()[start:end] +} + +func (b *BinaryBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.resize((capacity + 1) * b.offsetByteWidth) +} + +// DataLen returns the number of bytes in the data array. +func (b *BinaryBuilder) DataLen() int { return b.values.length } + +// DataCap returns the total number of bytes that can be stored +// without allocating additional memory. +func (b *BinaryBuilder) DataCap() int { return b.values.capacity } + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *BinaryBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// ReserveData ensures there is enough space for appending n bytes +// by checking the capacity and resizing the data buffer if necessary. +func (b *BinaryBuilder) ReserveData(n int) { + if b.values.capacity < b.values.length+n { + b.values.resize(b.values.Len() + n) + } +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may be reduced. +func (b *BinaryBuilder) Resize(n int) { + b.offsets.resize((n + 1) * b.offsetByteWidth) + if (n * b.offsetByteWidth) < b.offsets.Len() { + b.offsets.SetLength(n * b.offsetByteWidth) + } + b.builder.resize(n, b.init) +} + +func (b *BinaryBuilder) ResizeData(n int) { + b.values.length = n +} + +// NewArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder +// so it can be used to build a new array. +// +// Builds the appropriate Binary or LargeBinary array based on the datatype +// it was initialized with. +func (b *BinaryBuilder) NewArray() arrow.Array { + if b.offsetByteWidth == arrow.Int32SizeBytes { + return b.NewBinaryArray() + } + return b.NewLargeBinaryArray() +} + +// NewBinaryArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder +// so it can be used to build a new array. +func (b *BinaryBuilder) NewBinaryArray() (a *Binary) { + if b.offsetByteWidth != arrow.Int32SizeBytes { + panic("arrow/array: invalid call to NewBinaryArray when building a LargeBinary array") + } + + data := b.newData() + a = NewBinaryData(data) + data.Release() + return +} + +func (b *BinaryBuilder) NewLargeBinaryArray() (a *LargeBinary) { + if b.offsetByteWidth != arrow.Int64SizeBytes { + panic("arrow/array: invalid call to NewLargeBinaryArray when building a Binary array") + } + + data := b.newData() + a = NewLargeBinaryData(data) + data.Release() + return +} + +func (b *BinaryBuilder) newData() (data *Data) { + b.appendNextOffset() + offsets, values := b.offsets.Finish(), b.values.Finish() + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, offsets, values}, nil, b.nulls, 0) + if offsets != nil { + offsets.Release() + } + + if values != nil { + values.Release() + } + + b.builder.reset() + + return +} + +func (b *BinaryBuilder) appendNextOffset() { + numBytes := b.values.Len() + debug.Assert(uint64(numBytes) <= b.maxCapacity, "exceeded maximum capacity of binary array") + b.appendOffsetVal(numBytes) +} + +func (b *BinaryBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + switch b.dtype.ID() { + case arrow.BINARY, arrow.LARGE_BINARY: + decodedVal, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("could not decode base64 string: %w", err) + } + b.Append(decodedVal) + case arrow.STRING, arrow.LARGE_STRING: + b.Append([]byte(s)) + default: + return fmt.Errorf("cannot append string to type %s", b.dtype) + } + return nil +} + +func (b *BinaryBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case string: + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err + } + b.Append(data) + case []byte: + b.Append(v) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf([]byte{}), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *BinaryBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *BinaryBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ Builder = (*BinaryBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go new file mode 100644 index 00000000..0f386bea --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strconv" + "strings" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// A type which represents an immutable sequence of boolean values. +type Boolean struct { + array + values []byte +} + +// NewBoolean creates a boolean array from the data memory.Buffer and contains length elements. +// The nullBitmap buffer can be nil of there are no null values. +// If nulls is not known, use UnknownNullCount to calculate the value of NullN at runtime from the nullBitmap buffer. +func NewBoolean(length int, data *memory.Buffer, nullBitmap *memory.Buffer, nulls int) *Boolean { + arrdata := NewData(arrow.FixedWidthTypes.Boolean, length, []*memory.Buffer{nullBitmap, data}, nil, nulls, 0) + defer arrdata.Release() + return NewBooleanData(arrdata) +} + +func NewBooleanData(data arrow.ArrayData) *Boolean { + a := &Boolean{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Boolean) Value(i int) bool { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return bitutil.BitIsSet(a.values, a.array.data.offset+i) +} + +func (a *Boolean) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } else { + return strconv.FormatBool(a.Value(i)) + } +} + +func (a *Boolean) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Boolean) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = vals.Bytes() + } +} + +func (a *Boolean) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.Value(i) + } + return nil +} + +func (a *Boolean) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = a.Value(i) + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualBoolean(left, right *Boolean) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +var ( + _ arrow.Array = (*Boolean)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go new file mode 100644 index 00000000..75c53a4f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go @@ -0,0 +1,247 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +type BooleanBuilder struct { + builder + + data *memory.Buffer + rawData []byte +} + +func NewBooleanBuilder(mem memory.Allocator) *BooleanBuilder { + return &BooleanBuilder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *BooleanBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.Boolean } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *BooleanBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *BooleanBuilder) Append(v bool) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *BooleanBuilder) AppendByte(v byte) { + b.Reserve(1) + b.UnsafeAppend(v != 0) +} + +func (b *BooleanBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *BooleanBuilder) AppendEmptyValue() { + b.Reserve(1) + b.UnsafeAppend(false) +} + +func (b *BooleanBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Append(val) + return nil +} + +func (b *BooleanBuilder) UnsafeAppend(v bool) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + if v { + bitutil.SetBit(b.rawData, b.length) + } else { + bitutil.ClearBit(b.rawData, b.length) + } + b.length++ +} + +func (b *BooleanBuilder) AppendValues(v []bool, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for i, vv := range v { + bitutil.SetBitTo(b.rawData, b.length+i, vv) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *BooleanBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.BooleanTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = b.data.Bytes() +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *BooleanBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *BooleanBuilder) Resize(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.init) + b.data.Resize(arrow.BooleanTraits.BytesRequired(n)) + b.rawData = b.data.Bytes() + } +} + +// NewArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder +// so it can be used to build a new array. +func (b *BooleanBuilder) NewArray() arrow.Array { + return b.NewBooleanArray() +} + +// NewBooleanArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder +// so it can be used to build a new array. +func (b *BooleanBuilder) NewBooleanArray() (a *Boolean) { + data := b.newData() + a = NewBooleanData(data) + data.Release() + return +} + +func (b *BooleanBuilder) newData() *Data { + bytesRequired := arrow.BooleanTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + res := NewData(arrow.FixedWidthTypes.Boolean, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return res +} + +func (b *BooleanBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case bool: + b.Append(v) + case string: + val, err := strconv.ParseBool(v) + if err != nil { + return err + } + b.Append(val) + case json.Number: + val, err := strconv.ParseBool(v.String()) + if err != nil { + return err + } + b.Append(val) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(true), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *BooleanBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *BooleanBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("boolean builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ Builder = (*BooleanBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go new file mode 100644 index 00000000..50e5a264 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +type bufBuilder interface { + Retain() + Release() + Len() int + Cap() int + Bytes() []byte + resize(int) + Advance(int) + SetLength(int) + Append([]byte) + Reset() + Finish() *memory.Buffer +} + +// A bufferBuilder provides common functionality for populating memory with a sequence of type-specific values. +// Specialized implementations provide type-safe APIs for appending and accessing the memory. +type bufferBuilder struct { + refCount int64 + mem memory.Allocator + buffer *memory.Buffer + length int + capacity int + + bytes []byte +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *bufferBuilder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *bufferBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.buffer != nil { + b.buffer.Release() + b.buffer, b.bytes = nil, nil + } + } +} + +// Len returns the length of the memory buffer in bytes. +func (b *bufferBuilder) Len() int { return b.length } + +// Cap returns the total number of bytes that can be stored without allocating additional memory. +func (b *bufferBuilder) Cap() int { return b.capacity } + +// Bytes returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *bufferBuilder) Bytes() []byte { return b.bytes[:b.length] } + +func (b *bufferBuilder) resize(elements int) { + if b.buffer == nil { + b.buffer = memory.NewResizableBuffer(b.mem) + } + + b.buffer.ResizeNoShrink(elements) + oldCapacity := b.capacity + b.capacity = b.buffer.Cap() + b.bytes = b.buffer.Buf() + + if b.capacity > oldCapacity { + memory.Set(b.bytes[oldCapacity:], 0) + } +} + +func (b *bufferBuilder) SetLength(length int) { + if length > b.length { + b.Advance(length) + return + } + + b.length = length +} + +// Advance increases the buffer by length and initializes the skipped bytes to zero. +func (b *bufferBuilder) Advance(length int) { + if b.capacity < b.length+length { + newCapacity := bitutil.NextPowerOf2(b.length + length) + b.resize(newCapacity) + } + b.length += length +} + +// Append appends the contents of v to the buffer, resizing it if necessary. +func (b *bufferBuilder) Append(v []byte) { + if b.capacity < b.length+len(v) { + newCapacity := bitutil.NextPowerOf2(b.length + len(v)) + b.resize(newCapacity) + } + b.unsafeAppend(v) +} + +// Reset returns the buffer to an empty state. Reset releases the memory and sets the length and capacity to zero. +func (b *bufferBuilder) Reset() { + if b.buffer != nil { + b.buffer.Release() + } + b.buffer, b.bytes = nil, nil + b.capacity, b.length = 0, 0 +} + +// Finish TODO(sgc) +func (b *bufferBuilder) Finish() (buffer *memory.Buffer) { + if b.length > 0 { + b.buffer.ResizeNoShrink(b.length) + } + buffer = b.buffer + b.buffer = nil + b.Reset() + if buffer == nil { + buffer = memory.NewBufferBytes(nil) + } + return +} + +func (b *bufferBuilder) unsafeAppend(data []byte) { + copy(b.bytes[b.length:], data) + b.length += len(data) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go new file mode 100644 index 00000000..7e30639a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import "github.com/apache/arrow/go/v12/arrow/memory" + +type byteBufferBuilder struct { + bufferBuilder +} + +func newByteBufferBuilder(mem memory.Allocator) *byteBufferBuilder { + return &byteBufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +func (b *byteBufferBuilder) Values() []byte { return b.Bytes() } +func (b *byteBufferBuilder) Value(i int) byte { return b.bytes[i] } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go new file mode 100644 index 00000000..1425d0b8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go @@ -0,0 +1,124 @@ +// Code generated by array/bufferbuilder_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +type int64BufferBuilder struct { + bufferBuilder +} + +func newInt64BufferBuilder(mem memory.Allocator) *int64BufferBuilder { + return &int64BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *int64BufferBuilder) AppendValues(v []int64) { b.Append(arrow.Int64Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *int64BufferBuilder) Values() []int64 { return arrow.Int64Traits.CastFromBytes(b.Bytes()) } + +// Value returns the int64 element at the index i. Value will panic if i is negative or โ‰ฅ Len. +func (b *int64BufferBuilder) Value(i int) int64 { return b.Values()[i] } + +// Len returns the number of int64 elements in the buffer. +func (b *int64BufferBuilder) Len() int { return b.length / arrow.Int64SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *int64BufferBuilder) AppendValue(v int64) { + if b.capacity < b.length+arrow.Int64SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int64SizeBytes) + b.resize(newCapacity) + } + arrow.Int64Traits.PutValue(b.bytes[b.length:], v) + b.length += arrow.Int64SizeBytes +} + +type int32BufferBuilder struct { + bufferBuilder +} + +func newInt32BufferBuilder(mem memory.Allocator) *int32BufferBuilder { + return &int32BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *int32BufferBuilder) AppendValues(v []int32) { b.Append(arrow.Int32Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *int32BufferBuilder) Values() []int32 { return arrow.Int32Traits.CastFromBytes(b.Bytes()) } + +// Value returns the int32 element at the index i. Value will panic if i is negative or โ‰ฅ Len. +func (b *int32BufferBuilder) Value(i int) int32 { return b.Values()[i] } + +// Len returns the number of int32 elements in the buffer. +func (b *int32BufferBuilder) Len() int { return b.length / arrow.Int32SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *int32BufferBuilder) AppendValue(v int32) { + if b.capacity < b.length+arrow.Int32SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int32SizeBytes) + b.resize(newCapacity) + } + arrow.Int32Traits.PutValue(b.bytes[b.length:], v) + b.length += arrow.Int32SizeBytes +} + +type int8BufferBuilder struct { + bufferBuilder +} + +func newInt8BufferBuilder(mem memory.Allocator) *int8BufferBuilder { + return &int8BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *int8BufferBuilder) AppendValues(v []int8) { b.Append(arrow.Int8Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *int8BufferBuilder) Values() []int8 { return arrow.Int8Traits.CastFromBytes(b.Bytes()) } + +// Value returns the int8 element at the index i. Value will panic if i is negative or โ‰ฅ Len. +func (b *int8BufferBuilder) Value(i int) int8 { return b.Values()[i] } + +// Len returns the number of int8 elements in the buffer. +func (b *int8BufferBuilder) Len() int { return b.length / arrow.Int8SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *int8BufferBuilder) AppendValue(v int8) { + if b.capacity < b.length+arrow.Int8SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int8SizeBytes) + b.resize(newCapacity) + } + arrow.Int8Traits.PutValue(b.bytes[b.length:], v) + b.length += arrow.Int8SizeBytes +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl new file mode 100644 index 00000000..ccda145e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +{{range .In}} +{{$TypeNamePrefix := .name}} +{{if .Opt.BufferBuilder}} +type {{$TypeNamePrefix}}BufferBuilder struct { + bufferBuilder +} + +func new{{.Name}}BufferBuilder(mem memory.Allocator) *{{$TypeNamePrefix}}BufferBuilder { + return &{{$TypeNamePrefix}}BufferBuilder{bufferBuilder:bufferBuilder{refCount: 1, mem:mem}} +} + +// AppendValues appends the contents of v to the buffer, growing the buffer as needed. +func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValues(v []{{.Type}}) { b.Append(arrow.{{.Name}}Traits.CastToBytes(v)) } + +// Values returns a slice of length b.Len(). +// The slice is only valid for use until the next buffer modification. That is, until the next call +// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next +// buffer modification. +func (b *{{$TypeNamePrefix}}BufferBuilder) Values() []{{.Type}} { return arrow.{{.Name}}Traits.CastFromBytes(b.Bytes()) } + +// Value returns the {{.Type}} element at the index i. Value will panic if i is negative or โ‰ฅ Len. +func (b *{{$TypeNamePrefix}}BufferBuilder) Value(i int) {{.Type}} { return b.Values()[i] } + +// Len returns the number of {{.Type}} elements in the buffer. +func (b *{{$TypeNamePrefix}}BufferBuilder) Len() int { return b.length/arrow.{{.Name}}SizeBytes } + +// AppendValue appends v to the buffer, growing the buffer as needed. +func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValue(v {{.Type}}) { + if b.capacity < b.length+arrow.{{.Name}}SizeBytes { + newCapacity := bitutil.NextPowerOf2(b.length + arrow.{{.Name}}SizeBytes) + b.resize(newCapacity) + } + arrow.{{.Name}}Traits.PutValue(b.bytes[b.length:], v) + b.length+=arrow.{{.Name}}SizeBytes +} +{{end}} +{{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go new file mode 100644 index 00000000..1805d973 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go @@ -0,0 +1,340 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +const ( + minBuilderCapacity = 1 << 5 +) + +// Builder provides an interface to build arrow arrays. +type Builder interface { + // you can unmarshal a json array to add the values to a builder + json.Unmarshaler + + // Type returns the datatype that this is building + Type() arrow.DataType + + // Retain increases the reference count by 1. + // Retain may be called simultaneously from multiple goroutines. + Retain() + + // Release decreases the reference count by 1. + Release() + + // Len returns the number of elements in the array builder. + Len() int + + // Cap returns the total number of elements that can be stored + // without allocating additional memory. + Cap() int + + // NullN returns the number of null values in the array builder. + NullN() int + + // AppendNull adds a new null value to the array being built. + AppendNull() + + // AppendEmptyValue adds a new zero value of the appropriate type + AppendEmptyValue() + + // AppendValueFromString adds a new value from a string. Inverse of array.ValueStr(i int) string + AppendValueFromString(string) error + + // Reserve ensures there is enough space for appending n elements + // by checking the capacity and calling Resize if necessary. + Reserve(n int) + + // Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), + // additional memory will be allocated. If n is smaller, the allocated memory may reduced. + Resize(n int) + + // NewArray creates a new array from the memory buffers used + // by the builder and resets the Builder so it can be used to build + // a new array. + NewArray() arrow.Array + + UnsafeAppendBoolToBitmap(bool) + + init(capacity int) + resize(newBits int, init func(int)) + + UnmarshalOne(*json.Decoder) error + Unmarshal(*json.Decoder) error + + newData() *Data +} + +// builder provides common functionality for managing the validity bitmap (nulls) when building arrays. +type builder struct { + refCount int64 + mem memory.Allocator + nullBitmap *memory.Buffer + nulls int + length int + capacity int +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *builder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Len returns the number of elements in the array builder. +func (b *builder) Len() int { return b.length } + +// Cap returns the total number of elements that can be stored without allocating additional memory. +func (b *builder) Cap() int { return b.capacity } + +// NullN returns the number of null values in the array builder. +func (b *builder) NullN() int { return b.nulls } + +func (b *builder) init(capacity int) { + toAlloc := bitutil.CeilByte(capacity) / 8 + b.nullBitmap = memory.NewResizableBuffer(b.mem) + b.nullBitmap.Resize(toAlloc) + b.capacity = capacity + memory.Set(b.nullBitmap.Buf(), 0) +} + +func (b *builder) reset() { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + + b.nulls = 0 + b.length = 0 + b.capacity = 0 +} + +func (b *builder) resize(newBits int, init func(int)) { + if b.nullBitmap == nil { + init(newBits) + return + } + + newBytesN := bitutil.CeilByte(newBits) / 8 + oldBytesN := b.nullBitmap.Len() + b.nullBitmap.Resize(newBytesN) + b.capacity = newBits + if oldBytesN < newBytesN { + // TODO(sgc): necessary? + memory.Set(b.nullBitmap.Buf()[oldBytesN:], 0) + } + if newBits < b.length { + b.length = newBits + b.nulls = newBits - bitutil.CountSetBits(b.nullBitmap.Buf(), 0, newBits) + } +} + +func (b *builder) reserve(elements int, resize func(int)) { + if b.nullBitmap == nil { + b.nullBitmap = memory.NewResizableBuffer(b.mem) + } + if b.length+elements > b.capacity { + newCap := bitutil.NextPowerOf2(b.length + elements) + resize(newCap) + } +} + +// unsafeAppendBoolsToBitmap appends the contents of valid to the validity bitmap. +// As an optimization, if the valid slice is empty, the next length bits will be set to valid (not null). +func (b *builder) unsafeAppendBoolsToBitmap(valid []bool, length int) { + if len(valid) == 0 { + b.unsafeSetValid(length) + return + } + + byteOffset := b.length / 8 + bitOffset := byte(b.length % 8) + nullBitmap := b.nullBitmap.Bytes() + bitSet := nullBitmap[byteOffset] + + for _, v := range valid { + if bitOffset == 8 { + bitOffset = 0 + nullBitmap[byteOffset] = bitSet + byteOffset++ + bitSet = nullBitmap[byteOffset] + } + + if v { + bitSet |= bitutil.BitMask[bitOffset] + } else { + bitSet &= bitutil.FlippedBitMask[bitOffset] + b.nulls++ + } + bitOffset++ + } + + if bitOffset != 0 { + nullBitmap[byteOffset] = bitSet + } + b.length += len(valid) +} + +// unsafeSetValid sets the next length bits to valid in the validity bitmap. +func (b *builder) unsafeSetValid(length int) { + padToByte := min(8-(b.length%8), length) + if padToByte == 8 { + padToByte = 0 + } + bits := b.nullBitmap.Bytes() + for i := b.length; i < b.length+padToByte; i++ { + bitutil.SetBit(bits, i) + } + + start := (b.length + padToByte) / 8 + fastLength := (length - padToByte) / 8 + memory.Set(bits[start:start+fastLength], 0xff) + + newLength := b.length + length + // trailing bytes + for i := b.length + padToByte + (fastLength * 8); i < newLength; i++ { + bitutil.SetBit(bits, i) + } + + b.length = newLength +} + +func (b *builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func NewBuilder(mem memory.Allocator, dtype arrow.DataType) Builder { + // FIXME(sbinet): use a type switch on dtype instead? + switch dtype.ID() { + case arrow.NULL: + return NewNullBuilder(mem) + case arrow.BOOL: + return NewBooleanBuilder(mem) + case arrow.UINT8: + return NewUint8Builder(mem) + case arrow.INT8: + return NewInt8Builder(mem) + case arrow.UINT16: + return NewUint16Builder(mem) + case arrow.INT16: + return NewInt16Builder(mem) + case arrow.UINT32: + return NewUint32Builder(mem) + case arrow.INT32: + return NewInt32Builder(mem) + case arrow.UINT64: + return NewUint64Builder(mem) + case arrow.INT64: + return NewInt64Builder(mem) + case arrow.FLOAT16: + return NewFloat16Builder(mem) + case arrow.FLOAT32: + return NewFloat32Builder(mem) + case arrow.FLOAT64: + return NewFloat64Builder(mem) + case arrow.STRING: + return NewStringBuilder(mem) + case arrow.LARGE_STRING: + return NewLargeStringBuilder(mem) + case arrow.BINARY: + return NewBinaryBuilder(mem, arrow.BinaryTypes.Binary) + case arrow.LARGE_BINARY: + return NewBinaryBuilder(mem, arrow.BinaryTypes.LargeBinary) + case arrow.FIXED_SIZE_BINARY: + typ := dtype.(*arrow.FixedSizeBinaryType) + return NewFixedSizeBinaryBuilder(mem, typ) + case arrow.DATE32: + return NewDate32Builder(mem) + case arrow.DATE64: + return NewDate64Builder(mem) + case arrow.TIMESTAMP: + typ := dtype.(*arrow.TimestampType) + return NewTimestampBuilder(mem, typ) + case arrow.TIME32: + typ := dtype.(*arrow.Time32Type) + return NewTime32Builder(mem, typ) + case arrow.TIME64: + typ := dtype.(*arrow.Time64Type) + return NewTime64Builder(mem, typ) + case arrow.INTERVAL_MONTHS: + return NewMonthIntervalBuilder(mem) + case arrow.INTERVAL_DAY_TIME: + return NewDayTimeIntervalBuilder(mem) + case arrow.INTERVAL_MONTH_DAY_NANO: + return NewMonthDayNanoIntervalBuilder(mem) + case arrow.DECIMAL128: + if typ, ok := dtype.(*arrow.Decimal128Type); ok { + return NewDecimal128Builder(mem, typ) + } + case arrow.DECIMAL256: + if typ, ok := dtype.(*arrow.Decimal256Type); ok { + return NewDecimal256Builder(mem, typ) + } + case arrow.LIST: + typ := dtype.(*arrow.ListType) + return NewListBuilderWithField(mem, typ.ElemField()) + case arrow.STRUCT: + typ := dtype.(*arrow.StructType) + return NewStructBuilder(mem, typ) + case arrow.SPARSE_UNION: + typ := dtype.(*arrow.SparseUnionType) + return NewSparseUnionBuilder(mem, typ) + case arrow.DENSE_UNION: + typ := dtype.(*arrow.DenseUnionType) + return NewDenseUnionBuilder(mem, typ) + case arrow.DICTIONARY: + typ := dtype.(*arrow.DictionaryType) + return NewDictionaryBuilder(mem, typ) + case arrow.LARGE_LIST: + typ := dtype.(*arrow.LargeListType) + return NewLargeListBuilderWithField(mem, typ.ElemField()) + case arrow.MAP: + typ := dtype.(*arrow.MapType) + return NewMapBuilderWithType(mem, typ) + case arrow.EXTENSION: + typ := dtype.(arrow.ExtensionType) + bldr := NewExtensionBuilder(mem, typ) + if custom, ok := typ.(ExtensionBuilderWrapper); ok { + return custom.NewBuilder(bldr) + } + return bldr + case arrow.FIXED_SIZE_LIST: + typ := dtype.(*arrow.FixedSizeListType) + return NewFixedSizeListBuilder(mem, typ.Len(), typ.Elem()) + case arrow.DURATION: + typ := dtype.(*arrow.DurationType) + return NewDurationBuilder(mem, typ) + case arrow.RUN_END_ENCODED: + typ := dtype.(*arrow.RunEndEncodedType) + return NewRunEndEncodedBuilder(mem, typ.RunEnds(), typ.Encoded()) + } + panic(fmt.Errorf("arrow/array: unsupported builder for %T", dtype)) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go new file mode 100644 index 00000000..75b9993b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go @@ -0,0 +1,734 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "math" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/float16" +) + +// RecordEqual reports whether the two provided records are equal. +func RecordEqual(left, right arrow.Record) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + for i := range left.Columns() { + lc := left.Column(i) + rc := right.Column(i) + if !ArrayEqual(lc, rc) { + return false + } + } + return true +} + +// RecordApproxEqual reports whether the two provided records are approximately equal. +// For non-floating point columns, it is equivalent to RecordEqual. +func RecordApproxEqual(left, right arrow.Record, opts ...EqualOption) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + opt := newEqualOption(opts...) + + for i := range left.Columns() { + lc := left.Column(i) + rc := right.Column(i) + if !arrayApproxEqual(lc, rc, opt) { + return false + } + } + return true +} + +// helper function to evaluate a function on two chunked object having possibly different +// chunk layouts. the function passed in will be called for each corresponding slice of the +// two chunked arrays and if the function returns false it will end the loop early. +func chunkedBinaryApply(left, right *arrow.Chunked, fn func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool) { + var ( + pos int64 + length int64 = int64(left.Len()) + leftIdx, rightIdx int + leftPos, rightPos int64 + ) + + for pos < length { + var cleft, cright arrow.Array + for { + cleft, cright = left.Chunk(leftIdx), right.Chunk(rightIdx) + if leftPos == int64(cleft.Len()) { + leftPos = 0 + leftIdx++ + continue + } + if rightPos == int64(cright.Len()) { + rightPos = 0 + rightIdx++ + continue + } + break + } + + sz := int64(min(cleft.Len()-int(leftPos), cright.Len()-int(rightPos))) + pos += sz + if !fn(cleft, leftPos, leftPos+sz, cright, rightPos, rightPos+sz) { + return + } + + leftPos += sz + rightPos += sz + } +} + +// ChunkedEqual reports whether two chunked arrays are equal regardless of their chunkings +func ChunkedEqual(left, right *arrow.Chunked) bool { + switch { + case left == right: + return true + case left.Len() != right.Len(): + return false + case left.NullN() != right.NullN(): + return false + case !arrow.TypeEqual(left.DataType(), right.DataType()): + return false + } + + var isequal bool = true + chunkedBinaryApply(left, right, func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { + isequal = SliceEqual(left, lbeg, lend, right, rbeg, rend) + return isequal + }) + + return isequal +} + +// ChunkedApproxEqual reports whether two chunked arrays are approximately equal regardless of their chunkings +// for non-floating point arrays, this is equivalent to ChunkedEqual +func ChunkedApproxEqual(left, right *arrow.Chunked, opts ...EqualOption) bool { + switch { + case left == right: + return true + case left.Len() != right.Len(): + return false + case left.NullN() != right.NullN(): + return false + case !arrow.TypeEqual(left.DataType(), right.DataType()): + return false + } + + var isequal bool + chunkedBinaryApply(left, right, func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { + isequal = SliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opts...) + return isequal + }) + + return isequal +} + +// TableEqual returns if the two tables have the same data in the same schema +func TableEqual(left, right arrow.Table) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + for i := 0; int64(i) < left.NumCols(); i++ { + lc := left.Column(i) + rc := right.Column(i) + if !lc.Field().Equal(rc.Field()) { + return false + } + + if !ChunkedEqual(lc.Data(), rc.Data()) { + return false + } + } + return true +} + +// TableEqual returns if the two tables have the approximately equal data in the same schema +func TableApproxEqual(left, right arrow.Table, opts ...EqualOption) bool { + switch { + case left.NumCols() != right.NumCols(): + return false + case left.NumRows() != right.NumRows(): + return false + } + + for i := 0; int64(i) < left.NumCols(); i++ { + lc := left.Column(i) + rc := right.Column(i) + if !lc.Field().Equal(rc.Field()) { + return false + } + + if !ChunkedApproxEqual(lc.Data(), rc.Data(), opts...) { + return false + } + } + return true +} + +// ArrayEqual reports whether the two provided arrays are equal. +// +// Deprecated: This currently just delegates to calling Equal. This will be +// removed in v9 so please update any calling code to just call array.Equal +// directly instead. +func ArrayEqual(left, right arrow.Array) bool { + return Equal(left, right) +} + +// Equal reports whether the two provided arrays are equal. +func Equal(left, right arrow.Array) bool { + switch { + case !baseArrayEqual(left, right): + return false + case left.Len() == 0: + return true + case left.NullN() == left.Len(): + return true + } + + // at this point, we know both arrays have same type, same length, same number of nulls + // and nulls at the same place. + // compare the values. + + switch l := left.(type) { + case *Null: + return true + case *Boolean: + r := right.(*Boolean) + return arrayEqualBoolean(l, r) + case *FixedSizeBinary: + r := right.(*FixedSizeBinary) + return arrayEqualFixedSizeBinary(l, r) + case *Binary: + r := right.(*Binary) + return arrayEqualBinary(l, r) + case *String: + r := right.(*String) + return arrayEqualString(l, r) + case *LargeBinary: + r := right.(*LargeBinary) + return arrayEqualLargeBinary(l, r) + case *LargeString: + r := right.(*LargeString) + return arrayEqualLargeString(l, r) + case *Int8: + r := right.(*Int8) + return arrayEqualInt8(l, r) + case *Int16: + r := right.(*Int16) + return arrayEqualInt16(l, r) + case *Int32: + r := right.(*Int32) + return arrayEqualInt32(l, r) + case *Int64: + r := right.(*Int64) + return arrayEqualInt64(l, r) + case *Uint8: + r := right.(*Uint8) + return arrayEqualUint8(l, r) + case *Uint16: + r := right.(*Uint16) + return arrayEqualUint16(l, r) + case *Uint32: + r := right.(*Uint32) + return arrayEqualUint32(l, r) + case *Uint64: + r := right.(*Uint64) + return arrayEqualUint64(l, r) + case *Float16: + r := right.(*Float16) + return arrayEqualFloat16(l, r) + case *Float32: + r := right.(*Float32) + return arrayEqualFloat32(l, r) + case *Float64: + r := right.(*Float64) + return arrayEqualFloat64(l, r) + case *Decimal128: + r := right.(*Decimal128) + return arrayEqualDecimal128(l, r) + case *Decimal256: + r := right.(*Decimal256) + return arrayEqualDecimal256(l, r) + case *Date32: + r := right.(*Date32) + return arrayEqualDate32(l, r) + case *Date64: + r := right.(*Date64) + return arrayEqualDate64(l, r) + case *Time32: + r := right.(*Time32) + return arrayEqualTime32(l, r) + case *Time64: + r := right.(*Time64) + return arrayEqualTime64(l, r) + case *Timestamp: + r := right.(*Timestamp) + return arrayEqualTimestamp(l, r) + case *List: + r := right.(*List) + return arrayEqualList(l, r) + case *LargeList: + r := right.(*LargeList) + return arrayEqualLargeList(l, r) + case *FixedSizeList: + r := right.(*FixedSizeList) + return arrayEqualFixedSizeList(l, r) + case *Struct: + r := right.(*Struct) + return arrayEqualStruct(l, r) + case *MonthInterval: + r := right.(*MonthInterval) + return arrayEqualMonthInterval(l, r) + case *DayTimeInterval: + r := right.(*DayTimeInterval) + return arrayEqualDayTimeInterval(l, r) + case *MonthDayNanoInterval: + r := right.(*MonthDayNanoInterval) + return arrayEqualMonthDayNanoInterval(l, r) + case *Duration: + r := right.(*Duration) + return arrayEqualDuration(l, r) + case *Map: + r := right.(*Map) + return arrayEqualMap(l, r) + case ExtensionArray: + r := right.(ExtensionArray) + return arrayEqualExtension(l, r) + case *Dictionary: + r := right.(*Dictionary) + return arrayEqualDict(l, r) + case *SparseUnion: + r := right.(*SparseUnion) + return arraySparseUnionEqual(l, r) + case *DenseUnion: + r := right.(*DenseUnion) + return arrayDenseUnionEqual(l, r) + case *RunEndEncoded: + r := right.(*RunEndEncoded) + return arrayRunEndEncodedEqual(l, r) + default: + panic(fmt.Errorf("arrow/array: unknown array type %T", l)) + } +} + +// ArraySliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal. +// +// Deprecated: Renamed to just array.SliceEqual, this currently will just delegate to the renamed +// function and will be removed in v9. Please update any calling code. +func ArraySliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { + return SliceEqual(left, lbeg, lend, right, rbeg, rend) +} + +// SliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal. +func SliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { + l := NewSlice(left, lbeg, lend) + defer l.Release() + r := NewSlice(right, rbeg, rend) + defer r.Release() + + return Equal(l, r) +} + +// ArraySliceApproxEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are approximately equal. +// +// Deprecated: renamed to just SliceApproxEqual and will be removed in v9. Please update +// calling code to just call array.SliceApproxEqual. +func ArraySliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opts ...EqualOption) bool { + return SliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opts...) +} + +// SliceApproxEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are approximately equal. +func SliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opts ...EqualOption) bool { + opt := newEqualOption(opts...) + return sliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opt) +} + +func sliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opt equalOption) bool { + l := NewSlice(left, lbeg, lend) + defer l.Release() + r := NewSlice(right, rbeg, rend) + defer r.Release() + + return arrayApproxEqual(l, r, opt) +} + +const defaultAbsoluteTolerance = 1e-5 + +type equalOption struct { + atol float64 // absolute tolerance + nansEq bool // whether NaNs are considered equal. +} + +func (eq equalOption) f16(f1, f2 float16.Num) bool { + v1 := float64(f1.Float32()) + v2 := float64(f2.Float32()) + switch { + case eq.nansEq: + return math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return math.Abs(v1-v2) <= eq.atol + } +} + +func (eq equalOption) f32(f1, f2 float32) bool { + v1 := float64(f1) + v2 := float64(f2) + switch { + case eq.nansEq: + return v1 == v2 || math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return v1 == v2 || math.Abs(v1-v2) <= eq.atol + } +} + +func (eq equalOption) f64(v1, v2 float64) bool { + switch { + case eq.nansEq: + return v1 == v2 || math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2)) + default: + return v1 == v2 || math.Abs(v1-v2) <= eq.atol + } +} + +func newEqualOption(opts ...EqualOption) equalOption { + eq := equalOption{ + atol: defaultAbsoluteTolerance, + nansEq: false, + } + for _, opt := range opts { + opt(&eq) + } + + return eq +} + +// EqualOption is a functional option type used to configure how Records and Arrays are compared. +type EqualOption func(*equalOption) + +// WithNaNsEqual configures the comparison functions so that NaNs are considered equal. +func WithNaNsEqual(v bool) EqualOption { + return func(o *equalOption) { + o.nansEq = v + } +} + +// WithAbsTolerance configures the comparison functions so that 2 floating point values +// v1 and v2 are considered equal if |v1-v2| <= atol. +func WithAbsTolerance(atol float64) EqualOption { + return func(o *equalOption) { + o.atol = atol + } +} + +// ArrayApproxEqual reports whether the two provided arrays are approximately equal. +// For non-floating point arrays, it is equivalent to ArrayEqual. +// +// Deprecated: renamed to just ApproxEqual, this alias will be removed in v9. Please update +// calling code to just call array.ApproxEqual +func ArrayApproxEqual(left, right arrow.Array, opts ...EqualOption) bool { + return ApproxEqual(left, right, opts...) +} + +// ApproxEqual reports whether the two provided arrays are approximately equal. +// For non-floating point arrays, it is equivalent to ArrayEqual. +func ApproxEqual(left, right arrow.Array, opts ...EqualOption) bool { + opt := newEqualOption(opts...) + return arrayApproxEqual(left, right, opt) +} + +func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool { + switch { + case !baseArrayEqual(left, right): + return false + case left.Len() == 0: + return true + case left.NullN() == left.Len(): + return true + } + + // at this point, we know both arrays have same type, same length, same number of nulls + // and nulls at the same place. + // compare the values. + + switch l := left.(type) { + case *Null: + return true + case *Boolean: + r := right.(*Boolean) + return arrayEqualBoolean(l, r) + case *FixedSizeBinary: + r := right.(*FixedSizeBinary) + return arrayEqualFixedSizeBinary(l, r) + case *Binary: + r := right.(*Binary) + return arrayEqualBinary(l, r) + case *String: + r := right.(*String) + return arrayEqualString(l, r) + case *LargeBinary: + r := right.(*LargeBinary) + return arrayEqualLargeBinary(l, r) + case *LargeString: + r := right.(*LargeString) + return arrayEqualLargeString(l, r) + case *Int8: + r := right.(*Int8) + return arrayEqualInt8(l, r) + case *Int16: + r := right.(*Int16) + return arrayEqualInt16(l, r) + case *Int32: + r := right.(*Int32) + return arrayEqualInt32(l, r) + case *Int64: + r := right.(*Int64) + return arrayEqualInt64(l, r) + case *Uint8: + r := right.(*Uint8) + return arrayEqualUint8(l, r) + case *Uint16: + r := right.(*Uint16) + return arrayEqualUint16(l, r) + case *Uint32: + r := right.(*Uint32) + return arrayEqualUint32(l, r) + case *Uint64: + r := right.(*Uint64) + return arrayEqualUint64(l, r) + case *Float16: + r := right.(*Float16) + return arrayApproxEqualFloat16(l, r, opt) + case *Float32: + r := right.(*Float32) + return arrayApproxEqualFloat32(l, r, opt) + case *Float64: + r := right.(*Float64) + return arrayApproxEqualFloat64(l, r, opt) + case *Decimal128: + r := right.(*Decimal128) + return arrayEqualDecimal128(l, r) + case *Decimal256: + r := right.(*Decimal256) + return arrayEqualDecimal256(l, r) + case *Date32: + r := right.(*Date32) + return arrayEqualDate32(l, r) + case *Date64: + r := right.(*Date64) + return arrayEqualDate64(l, r) + case *Time32: + r := right.(*Time32) + return arrayEqualTime32(l, r) + case *Time64: + r := right.(*Time64) + return arrayEqualTime64(l, r) + case *Timestamp: + r := right.(*Timestamp) + return arrayEqualTimestamp(l, r) + case *List: + r := right.(*List) + return arrayApproxEqualList(l, r, opt) + case *LargeList: + r := right.(*LargeList) + return arrayApproxEqualLargeList(l, r, opt) + case *FixedSizeList: + r := right.(*FixedSizeList) + return arrayApproxEqualFixedSizeList(l, r, opt) + case *Struct: + r := right.(*Struct) + return arrayApproxEqualStruct(l, r, opt) + case *MonthInterval: + r := right.(*MonthInterval) + return arrayEqualMonthInterval(l, r) + case *DayTimeInterval: + r := right.(*DayTimeInterval) + return arrayEqualDayTimeInterval(l, r) + case *MonthDayNanoInterval: + r := right.(*MonthDayNanoInterval) + return arrayEqualMonthDayNanoInterval(l, r) + case *Duration: + r := right.(*Duration) + return arrayEqualDuration(l, r) + case *Map: + r := right.(*Map) + return arrayApproxEqualList(l.List, r.List, opt) + case *Dictionary: + r := right.(*Dictionary) + return arrayApproxEqualDict(l, r, opt) + case ExtensionArray: + r := right.(ExtensionArray) + return arrayApproxEqualExtension(l, r, opt) + case *SparseUnion: + r := right.(*SparseUnion) + return arraySparseUnionApproxEqual(l, r, opt) + case *DenseUnion: + r := right.(*DenseUnion) + return arrayDenseUnionApproxEqual(l, r, opt) + case *RunEndEncoded: + r := right.(*RunEndEncoded) + return arrayRunEndEncodedApproxEqual(l, r, opt) + default: + panic(fmt.Errorf("arrow/array: unknown array type %T", l)) + } +} + +func baseArrayEqual(left, right arrow.Array) bool { + switch { + case left.Len() != right.Len(): + return false + case left.NullN() != right.NullN(): + return false + case !arrow.TypeEqual(left.DataType(), right.DataType()): // We do not check for metadata as in the C++ implementation. + return false + case !validityBitmapEqual(left, right): + return false + } + return true +} + +func validityBitmapEqual(left, right arrow.Array) bool { + // TODO(alexandreyc): make it faster by comparing byte slices of the validity bitmap? + n := left.Len() + if n != right.Len() { + return false + } + for i := 0; i < n; i++ { + if left.IsNull(i) != right.IsNull(i) { + return false + } + } + return true +} + +func arrayApproxEqualFloat16(left, right *Float16, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f16(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualFloat32(left, right *Float32, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f32(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualFloat64(left, right *Float64, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !opt.f64(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualList(left, right *List, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualLargeList(left, right *LargeList, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualFixedSizeList(left, right *FixedSizeList, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualStruct(left, right *Struct, opt equalOption) bool { + for i, lf := range left.fields { + rf := right.fields[i] + if !arrayApproxEqual(lf, rf, opt) { + return false + } + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go new file mode 100644 index 00000000..c8e12318 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go @@ -0,0 +1,737 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/encoded" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v12/internal/utils" +) + +// Concatenate creates a new arrow.Array which is the concatenation of the +// passed in arrays. Returns nil if an error is encountered. +// +// The passed in arrays still need to be released manually, and will not be +// released by this function. +func Concatenate(arrs []arrow.Array, mem memory.Allocator) (result arrow.Array, err error) { + if len(arrs) == 0 { + return nil, errors.New("array/concat: must pass at least one array") + } + + defer func() { + if pErr := recover(); pErr != nil { + switch e := pErr.(type) { + case error: + err = fmt.Errorf("arrow/concat: %w", e) + default: + err = fmt.Errorf("arrow/concat: %v", pErr) + } + } + }() + + // gather Data of inputs + data := make([]arrow.ArrayData, len(arrs)) + for i, ar := range arrs { + if !arrow.TypeEqual(ar.DataType(), arrs[0].DataType()) { + return nil, fmt.Errorf("arrays to be concatenated must be identically typed, but %s and %s were encountered", + arrs[0].DataType(), ar.DataType()) + } + data[i] = ar.Data() + } + + out, err := concat(data, mem) + if err != nil { + return nil, err + } + + defer out.Release() + return MakeFromData(out), nil +} + +// simple struct to hold ranges +type rng struct { + offset, len int +} + +// simple bitmap struct to reference a specific slice of a bitmap where the range +// offset and length are in bits +type bitmap struct { + data []byte + rng rng +} + +// gather up the bitmaps from the passed in data objects +func gatherBitmaps(data []arrow.ArrayData, idx int) []bitmap { + out := make([]bitmap, len(data)) + for i, d := range data { + if d.Buffers()[idx] != nil { + out[i].data = d.Buffers()[idx].Bytes() + } + out[i].rng.offset = d.Offset() + out[i].rng.len = d.Len() + } + return out +} + +// gatherFixedBuffers gathers up the buffer objects of the given index, specifically +// returning only the slices of the buffers which are relevant to the passed in arrays +// in case they are themselves slices of other arrays. nil buffers are ignored and not +// in the output slice. +func gatherFixedBuffers(data []arrow.ArrayData, idx, byteWidth int) []*memory.Buffer { + out := make([]*memory.Buffer, 0, len(data)) + for _, d := range data { + buf := d.Buffers()[idx] + if buf == nil { + continue + } + + out = append(out, memory.NewBufferBytes(buf.Bytes()[d.Offset()*byteWidth:(d.Offset()+d.Len())*byteWidth])) + } + return out +} + +// gatherBuffersFixedWidthType is like gatherFixedBuffers, but uses a datatype to determine the size +// to use for determining the byte slice rather than a passed in bytewidth. +func gatherBuffersFixedWidthType(data []arrow.ArrayData, idx int, fixed arrow.FixedWidthDataType) []*memory.Buffer { + return gatherFixedBuffers(data, idx, fixed.BitWidth()/8) +} + +// gatherBufferRanges requires that len(ranges) == len(data) and returns a list of buffers +// which represent the corresponding range of each buffer in the specified index of each +// data object. +func gatherBufferRanges(data []arrow.ArrayData, idx int, ranges []rng) []*memory.Buffer { + out := make([]*memory.Buffer, 0, len(data)) + for i, d := range data { + buf := d.Buffers()[idx] + if buf == nil { + debug.Assert(ranges[i].len == 0, "misaligned buffer value ranges") + continue + } + + out = append(out, memory.NewBufferBytes(buf.Bytes()[ranges[i].offset:ranges[i].offset+ranges[i].len])) + } + return out +} + +// gatherChildren gathers the children data objects for child of index idx for all of the data objects. +func gatherChildren(data []arrow.ArrayData, idx int) []arrow.ArrayData { + return gatherChildrenMultiplier(data, idx, 1) +} + +// gatherChildrenMultiplier gathers the full data slice of the underlying values from the children data objects +// such as the values data for a list array so that it can return a slice of the buffer for a given +// index into the children. +func gatherChildrenMultiplier(data []arrow.ArrayData, idx, multiplier int) []arrow.ArrayData { + out := make([]arrow.ArrayData, len(data)) + for i, d := range data { + out[i] = NewSliceData(d.Children()[idx], int64(d.Offset()*multiplier), int64(d.Offset()+d.Len())*int64(multiplier)) + } + return out +} + +// gatherChildrenRanges returns a slice of Data objects which each represent slices of the given ranges from the +// child in the specified index from each data object. +func gatherChildrenRanges(data []arrow.ArrayData, idx int, ranges []rng) []arrow.ArrayData { + debug.Assert(len(data) == len(ranges), "mismatched children ranges for concat") + out := make([]arrow.ArrayData, len(data)) + for i, d := range data { + out[i] = NewSliceData(d.Children()[idx], int64(ranges[i].offset), int64(ranges[i].offset+ranges[i].len)) + } + return out +} + +// creates a single contiguous buffer which contains the concatenation of all of the passed +// in buffer objects. +func concatBuffers(bufs []*memory.Buffer, mem memory.Allocator) *memory.Buffer { + outLen := 0 + for _, b := range bufs { + outLen += b.Len() + } + out := memory.NewResizableBuffer(mem) + out.Resize(outLen) + + data := out.Bytes() + for _, b := range bufs { + copy(data, b.Bytes()) + data = data[b.Len():] + } + return out +} + +func handle32BitOffsets(outLen int, buffers []*memory.Buffer, out *memory.Buffer) (*memory.Buffer, []rng, error) { + dst := arrow.Int32Traits.CastFromBytes(out.Bytes()) + valuesRanges := make([]rng, len(buffers)) + nextOffset := int32(0) + nextElem := int(0) + for i, b := range buffers { + if b.Len() == 0 { + valuesRanges[i].offset = 0 + valuesRanges[i].len = 0 + continue + } + + // when we gather our buffers, we sliced off the last offset from the buffer + // so that we could count the lengths accurately + src := arrow.Int32Traits.CastFromBytes(b.Bytes()) + valuesRanges[i].offset = int(src[0]) + // expand our slice to see that final offset + expand := src[:len(src)+1] + // compute the length of this range by taking the final offset and subtracting where we started. + valuesRanges[i].len = int(expand[len(src)]) - valuesRanges[i].offset + + if nextOffset > math.MaxInt32-int32(valuesRanges[i].len) { + return nil, nil, errors.New("offset overflow while concatenating arrays") + } + + // adjust each offset by the difference between our last ending point and our starting point + adj := nextOffset - src[0] + for j, o := range src { + dst[nextElem+j] = adj + o + } + + // the next index for an element in the output buffer + nextElem += b.Len() / arrow.Int32SizeBytes + // update our offset counter to be the total current length of our output + nextOffset += int32(valuesRanges[i].len) + } + + // final offset should point to the end of the data + dst[outLen] = nextOffset + return out, valuesRanges, nil +} + +func unifyDictionaries(mem memory.Allocator, data []arrow.ArrayData, dt *arrow.DictionaryType) ([]*memory.Buffer, arrow.Array, error) { + unifier, err := NewDictionaryUnifier(mem, dt.ValueType) + if err != nil { + return nil, nil, err + } + defer unifier.Release() + + newLookup := make([]*memory.Buffer, len(data)) + for i, d := range data { + dictArr := MakeFromData(d.Dictionary()) + defer dictArr.Release() + newLookup[i], err = unifier.UnifyAndTranspose(dictArr) + if err != nil { + return nil, nil, err + } + } + + unified, err := unifier.GetResultWithIndexType(dt.IndexType) + if err != nil { + for _, b := range newLookup { + b.Release() + } + return nil, nil, err + } + return newLookup, unified, nil +} + +func concatDictIndices(mem memory.Allocator, data []arrow.ArrayData, idxType arrow.FixedWidthDataType, transpositions []*memory.Buffer) (out *memory.Buffer, err error) { + defer func() { + if err != nil && out != nil { + out.Release() + out = nil + } + }() + + idxWidth := idxType.BitWidth() / 8 + outLen := 0 + for i, d := range data { + outLen += d.Len() + defer transpositions[i].Release() + } + + out = memory.NewResizableBuffer(mem) + out.Resize(outLen * idxWidth) + + outData := out.Bytes() + for i, d := range data { + transposeMap := arrow.Int32Traits.CastFromBytes(transpositions[i].Bytes()) + src := d.Buffers()[1].Bytes() + if d.Buffers()[0] == nil { + if err = utils.TransposeIntsBuffers(idxType, idxType, src, outData, d.Offset(), 0, d.Len(), transposeMap); err != nil { + return + } + } else { + rdr := bitutils.NewBitRunReader(d.Buffers()[0].Bytes(), int64(d.Offset()), int64(d.Len())) + pos := 0 + for { + run := rdr.NextRun() + if run.Len == 0 { + break + } + + if run.Set { + err = utils.TransposeIntsBuffers(idxType, idxType, src, outData, d.Offset()+pos, pos, int(run.Len), transposeMap) + if err != nil { + return + } + } else { + memory.Set(outData[pos:pos+(int(run.Len)*idxWidth)], 0x00) + } + + pos += int(run.Len) + } + } + outData = outData[d.Len()*idxWidth:] + } + return +} + +func handle64BitOffsets(outLen int, buffers []*memory.Buffer, out *memory.Buffer) (*memory.Buffer, []rng, error) { + dst := arrow.Int64Traits.CastFromBytes(out.Bytes()) + valuesRanges := make([]rng, len(buffers)) + nextOffset := int64(0) + nextElem := int(0) + for i, b := range buffers { + if b.Len() == 0 { + valuesRanges[i].offset = 0 + valuesRanges[i].len = 0 + continue + } + + // when we gather our buffers, we sliced off the last offset from the buffer + // so that we could count the lengths accurately + src := arrow.Int64Traits.CastFromBytes(b.Bytes()) + valuesRanges[i].offset = int(src[0]) + // expand our slice to see that final offset + expand := src[:len(src)+1] + // compute the length of this range by taking the final offset and subtracting where we started. + valuesRanges[i].len = int(expand[len(src)]) - valuesRanges[i].offset + + if nextOffset > math.MaxInt64-int64(valuesRanges[i].len) { + return nil, nil, errors.New("offset overflow while concatenating arrays") + } + + // adjust each offset by the difference between our last ending point and our starting point + adj := nextOffset - src[0] + for j, o := range src { + dst[nextElem+j] = adj + o + } + + // the next index for an element in the output buffer + nextElem += b.Len() / arrow.Int64SizeBytes + // update our offset counter to be the total current length of our output + nextOffset += int64(valuesRanges[i].len) + } + + // final offset should point to the end of the data + dst[outLen] = nextOffset + return out, valuesRanges, nil +} + +// concatOffsets creates a single offset buffer which represents the concatenation of all of the +// offsets buffers, adjusting the offsets appropriately to their new relative locations. +// +// It also returns the list of ranges that need to be fetched for the corresponding value buffers +// to construct the final concatenated value buffer. +func concatOffsets(buffers []*memory.Buffer, byteWidth int, mem memory.Allocator) (*memory.Buffer, []rng, error) { + outLen := 0 + for _, b := range buffers { + outLen += b.Len() / byteWidth + } + + out := memory.NewResizableBuffer(mem) + out.Resize(byteWidth * (outLen + 1)) + + switch byteWidth { + case arrow.Int64SizeBytes: + return handle64BitOffsets(outLen, buffers, out) + default: + return handle32BitOffsets(outLen, buffers, out) + } +} + +// concat is the implementation for actually performing the concatenation of the arrow.ArrayData +// objects that we can call internally for nested types. +func concat(data []arrow.ArrayData, mem memory.Allocator) (arrow.ArrayData, error) { + out := &Data{refCount: 1, dtype: data[0].DataType(), nulls: 0} + for _, d := range data { + out.length += d.Len() + if out.nulls == UnknownNullCount || d.NullN() == UnknownNullCount { + out.nulls = UnknownNullCount + continue + } + out.nulls += d.NullN() + } + + out.buffers = make([]*memory.Buffer, len(data[0].Buffers())) + if out.nulls != 0 && out.dtype.ID() != arrow.NULL { + bm, err := concatBitmaps(gatherBitmaps(data, 0), mem) + if err != nil { + return nil, err + } + out.buffers[0] = bm + } + + dt := out.dtype + if dt.ID() == arrow.EXTENSION { + dt = dt.(arrow.ExtensionType).StorageType() + } + + switch dt := dt.(type) { + case *arrow.NullType: + case *arrow.BooleanType: + bm, err := concatBitmaps(gatherBitmaps(data, 1), mem) + if err != nil { + return nil, err + } + out.buffers[1] = bm + case *arrow.DictionaryType: + idxType := dt.IndexType.(arrow.FixedWidthDataType) + // two cases: all dictionaries are the same or we need to unify them + dictsSame := true + dict0 := MakeFromData(data[0].Dictionary()) + defer dict0.Release() + for _, d := range data { + dict := MakeFromData(d.Dictionary()) + if !Equal(dict0, dict) { + dict.Release() + dictsSame = false + break + } + dict.Release() + } + + indexBuffers := gatherBuffersFixedWidthType(data, 1, idxType) + if dictsSame { + out.dictionary = dict0.Data().(*Data) + out.dictionary.Retain() + out.buffers[1] = concatBuffers(indexBuffers, mem) + break + } + + indexLookup, unifiedDict, err := unifyDictionaries(mem, data, dt) + if err != nil { + return nil, err + } + defer unifiedDict.Release() + out.dictionary = unifiedDict.Data().(*Data) + out.dictionary.Retain() + + out.buffers[1], err = concatDictIndices(mem, data, idxType, indexLookup) + if err != nil { + return nil, err + } + case arrow.FixedWidthDataType: + out.buffers[1] = concatBuffers(gatherBuffersFixedWidthType(data, 1, dt), mem) + case arrow.BinaryDataType: + offsetWidth := dt.Layout().Buffers[1].ByteWidth + offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) + if err != nil { + return nil, err + } + out.buffers[2] = concatBuffers(gatherBufferRanges(data, 2, valueRanges), mem) + out.buffers[1] = offsetBuffer + case *arrow.ListType: + offsetWidth := dt.Layout().Buffers[1].ByteWidth + offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) + if err != nil { + return nil, err + } + childData := gatherChildrenRanges(data, 0, valueRanges) + for _, c := range childData { + defer c.Release() + } + + out.buffers[1] = offsetBuffer + out.childData = make([]arrow.ArrayData, 1) + out.childData[0], err = concat(childData, mem) + if err != nil { + return nil, err + } + case *arrow.LargeListType: + offsetWidth := dt.Layout().Buffers[1].ByteWidth + offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) + if err != nil { + return nil, err + } + childData := gatherChildrenRanges(data, 0, valueRanges) + for _, c := range childData { + defer c.Release() + } + + out.buffers[1] = offsetBuffer + out.childData = make([]arrow.ArrayData, 1) + out.childData[0], err = concat(childData, mem) + if err != nil { + return nil, err + } + case *arrow.FixedSizeListType: + childData := gatherChildrenMultiplier(data, 0, int(dt.Len())) + for _, c := range childData { + defer c.Release() + } + + children, err := concat(childData, mem) + if err != nil { + return nil, err + } + out.childData = []arrow.ArrayData{children} + case *arrow.StructType: + out.childData = make([]arrow.ArrayData, len(dt.Fields())) + for i := range dt.Fields() { + children := gatherChildren(data, i) + for _, c := range children { + defer c.Release() + } + + childData, err := concat(children, mem) + if err != nil { + return nil, err + } + out.childData[i] = childData + } + case *arrow.MapType: + offsetWidth := dt.Layout().Buffers[1].ByteWidth + offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) + if err != nil { + return nil, err + } + childData := gatherChildrenRanges(data, 0, valueRanges) + for _, c := range childData { + defer c.Release() + } + + out.buffers[1] = offsetBuffer + out.childData = make([]arrow.ArrayData, 1) + out.childData[0], err = concat(childData, mem) + if err != nil { + return nil, err + } + case *arrow.RunEndEncodedType: + physicalLength, overflow := int(0), false + // we can't use gatherChildren because the Offset and Len of + // data doesn't correspond to the physical length or offset + runs := make([]arrow.ArrayData, len(data)) + values := make([]arrow.ArrayData, len(data)) + for i, d := range data { + plen := encoded.GetPhysicalLength(d) + off := encoded.FindPhysicalOffset(d) + + runs[i] = NewSliceData(d.Children()[0], int64(off), int64(off+plen)) + defer runs[i].Release() + values[i] = NewSliceData(d.Children()[1], int64(off), int64(off+plen)) + defer values[i].Release() + + physicalLength, overflow = addOvf(physicalLength, plen) + if overflow { + return nil, fmt.Errorf("%w: run end encoded array length must fit into a 32-bit signed integer", + arrow.ErrInvalid) + } + } + + runEndsByteWidth := runs[0].DataType().(arrow.FixedWidthDataType).Bytes() + runEndsBuffers := gatherFixedBuffers(runs, 1, runEndsByteWidth) + outRunEndsLen := physicalLength * runEndsByteWidth + outRunEndsBuf := memory.NewResizableBuffer(mem) + outRunEndsBuf.Resize(outRunEndsLen) + defer outRunEndsBuf.Release() + + if err := updateRunEnds(runEndsByteWidth, data, runEndsBuffers, outRunEndsBuf); err != nil { + return nil, err + } + + out.childData = make([]arrow.ArrayData, 2) + out.childData[0] = NewData(data[0].Children()[0].DataType(), int(physicalLength), + []*memory.Buffer{nil, outRunEndsBuf}, nil, 0, 0) + + var err error + out.childData[1], err = concat(values, mem) + if err != nil { + out.childData[0].Release() + return nil, err + } + + default: + return nil, fmt.Errorf("concatenate not implemented for type %s", dt) + } + + return out, nil +} + +// check overflow in the addition, taken from bits.Add but adapted for signed integers +// rather than unsigned integers. bits.UintSize will be either 32 or 64 based on +// whether our architecture is 32 bit or 64. The operation is the same for both cases, +// the only difference is how much we need to shift by 30 for 32 bit and 62 for 64 bit. +// Thus, bits.UintSize - 2 is how much we shift right by to check if we had an overflow +// in the signed addition. +// +// First return is the result of the sum, the second return is true if there was an overflow +func addOvf(x, y int) (int, bool) { + sum := x + y + return sum, ((x&y)|((x|y)&^sum))>>(bits.UintSize-2) == 1 +} + +// concatenate bitmaps together and return a buffer with the combined bitmaps +func concatBitmaps(bitmaps []bitmap, mem memory.Allocator) (*memory.Buffer, error) { + var ( + outlen int + overflow bool + ) + + for _, bm := range bitmaps { + if outlen, overflow = addOvf(outlen, bm.rng.len); overflow { + return nil, errors.New("length overflow when concatenating arrays") + } + } + + out := memory.NewResizableBuffer(mem) + out.Resize(int(bitutil.BytesForBits(int64(outlen)))) + dst := out.Bytes() + + offset := 0 + for _, bm := range bitmaps { + if bm.data == nil { // if the bitmap is nil, that implies that the value is true for all elements + bitutil.SetBitsTo(out.Bytes(), int64(offset), int64(bm.rng.len), true) + } else { + bitutil.CopyBitmap(bm.data, bm.rng.offset, bm.rng.len, dst, offset) + } + offset += bm.rng.len + } + return out, nil +} + +func updateRunEnds(byteWidth int, inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, outputBuffer *memory.Buffer) error { + switch byteWidth { + case 2: + out := arrow.Int16Traits.CastFromBytes(outputBuffer.Bytes()) + return updateRunsInt16(inputData, inputBuffers, out) + case 4: + out := arrow.Int32Traits.CastFromBytes(outputBuffer.Bytes()) + return updateRunsInt32(inputData, inputBuffers, out) + case 8: + out := arrow.Int64Traits.CastFromBytes(outputBuffer.Bytes()) + return updateRunsInt64(inputData, inputBuffers, out) + } + return fmt.Errorf("%w: invalid dataType for RLE runEnds", arrow.ErrInvalid) +} + +func updateRunsInt16(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int16) error { + // for now we will not attempt to optimize by checking if we + // can fold the end and beginning of each array we're concatenating + // into a single run + pos := 0 + for i, buf := range inputBuffers { + if buf.Len() == 0 { + continue + } + src := arrow.Int16Traits.CastFromBytes(buf.Bytes()) + if pos == 0 { + pos += copy(output, src) + continue + } + + lastEnd := output[pos-1] + // we can check the last runEnd in the src and add it to the + // last value that we're adjusting them all by to see if we + // are going to overflow + if int64(lastEnd)+int64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt16 { + return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid) + } + + // adjust all of the run ends by first normalizing them (e - data[i].offset) + // then adding the previous value we ended on. Since the offset + // is a logical length offset it should be accurate to just subtract + // it from each value. + for j, e := range src { + output[pos+j] = lastEnd + int16(int(e)-inputData[i].Offset()) + } + pos += len(src) + } + return nil +} + +func updateRunsInt32(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int32) error { + // for now we will not attempt to optimize by checking if we + // can fold the end and beginning of each array we're concatenating + // into a single run + pos := 0 + for i, buf := range inputBuffers { + if buf.Len() == 0 { + continue + } + src := arrow.Int32Traits.CastFromBytes(buf.Bytes()) + if pos == 0 { + pos += copy(output, src) + continue + } + + lastEnd := output[pos-1] + // we can check the last runEnd in the src and add it to the + // last value that we're adjusting them all by to see if we + // are going to overflow + if int64(lastEnd)+int64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt32 { + return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid) + } + + // adjust all of the run ends by first normalizing them (e - data[i].offset) + // then adding the previous value we ended on. Since the offset + // is a logical length offset it should be accurate to just subtract + // it from each value. + for j, e := range src { + output[pos+j] = lastEnd + int32(int(e)-inputData[i].Offset()) + } + pos += len(src) + } + return nil +} + +func updateRunsInt64(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int64) error { + // for now we will not attempt to optimize by checking if we + // can fold the end and beginning of each array we're concatenating + // into a single run + pos := 0 + for i, buf := range inputBuffers { + if buf.Len() == 0 { + continue + } + src := arrow.Int64Traits.CastFromBytes(buf.Bytes()) + if pos == 0 { + pos += copy(output, src) + continue + } + + lastEnd := output[pos-1] + // we can check the last runEnd in the src and add it to the + // last value that we're adjusting them all by to see if we + // are going to overflow + if uint64(lastEnd)+uint64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt64 { + return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid) + } + + // adjust all of the run ends by first normalizing them (e - data[i].offset) + // then adding the previous value we ended on. Since the offset + // is a logical length offset it should be accurate to just subtract + // it from each value. + for j, e := range src { + output[pos+j] = lastEnd + e - int64(inputData[i].Offset()) + } + pos += len(src) + } + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/data.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/data.go new file mode 100644 index 00000000..55a84d8c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/data.go @@ -0,0 +1,250 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "hash/maphash" + "math/bits" + "sync/atomic" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// Data represents the memory and metadata of an Arrow array. +type Data struct { + refCount int64 + dtype arrow.DataType + nulls int + offset int + length int + + // for dictionary arrays: buffers will be the null validity bitmap and the indexes that reference + // values in the dictionary member. childData would be empty in a dictionary array + buffers []*memory.Buffer // TODO(sgc): should this be an interface? + childData []arrow.ArrayData // TODO(sgc): managed by ListArray, StructArray and UnionArray types + dictionary *Data // only populated for dictionary arrays +} + +// NewData creates a new Data. +func NewData(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []arrow.ArrayData, nulls, offset int) *Data { + for _, b := range buffers { + if b != nil { + b.Retain() + } + } + + for _, child := range childData { + if child != nil { + child.Retain() + } + } + + return &Data{ + refCount: 1, + dtype: dtype, + nulls: nulls, + length: length, + offset: offset, + buffers: buffers, + childData: childData, + } +} + +// NewDataWithDictionary creates a new data object, but also sets the provided dictionary into the data if it's not nil +func NewDataWithDictionary(dtype arrow.DataType, length int, buffers []*memory.Buffer, nulls, offset int, dict *Data) *Data { + data := NewData(dtype, length, buffers, nil, nulls, offset) + if dict != nil { + dict.Retain() + } + data.dictionary = dict + return data +} + +func (d *Data) Copy() *Data { + // don't pass the slices directly, otherwise it retains the connection + // we need to make new slices and populate them with the same pointers + bufs := make([]*memory.Buffer, len(d.buffers)) + copy(bufs, d.buffers) + children := make([]arrow.ArrayData, len(d.childData)) + copy(children, d.childData) + + data := NewData(d.dtype, d.length, bufs, children, d.nulls, d.offset) + data.SetDictionary(d.dictionary) + return data +} + +// Reset sets the Data for re-use. +func (d *Data) Reset(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []arrow.ArrayData, nulls, offset int) { + // Retain new buffers before releasing existing buffers in-case they're the same ones to prevent accidental premature + // release. + for _, b := range buffers { + if b != nil { + b.Retain() + } + } + for _, b := range d.buffers { + if b != nil { + b.Release() + } + } + d.buffers = buffers + + // Retain new children data before releasing existing children data in-case they're the same ones to prevent accidental + // premature release. + for _, d := range childData { + if d != nil { + d.Retain() + } + } + for _, d := range d.childData { + if d != nil { + d.Release() + } + } + d.childData = childData + + d.dtype = dtype + d.length = length + d.nulls = nulls + d.offset = offset +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (d *Data) Retain() { + atomic.AddInt64(&d.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (d *Data) Release() { + debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases") + + if atomic.AddInt64(&d.refCount, -1) == 0 { + for _, b := range d.buffers { + if b != nil { + b.Release() + } + } + + for _, b := range d.childData { + b.Release() + } + + if d.dictionary != nil { + d.dictionary.Release() + } + d.dictionary, d.buffers, d.childData = nil, nil, nil + } +} + +// DataType returns the DataType of the data. +func (d *Data) DataType() arrow.DataType { return d.dtype } + +func (d *Data) SetNullN(n int) { d.nulls = n } + +// NullN returns the number of nulls. +func (d *Data) NullN() int { return d.nulls } + +// Len returns the length. +func (d *Data) Len() int { return d.length } + +// Offset returns the offset. +func (d *Data) Offset() int { return d.offset } + +// Buffers returns the buffers. +func (d *Data) Buffers() []*memory.Buffer { return d.buffers } + +func (d *Data) Children() []arrow.ArrayData { return d.childData } + +// Dictionary returns the ArrayData object for the dictionary member, or nil +func (d *Data) Dictionary() arrow.ArrayData { return d.dictionary } + +// SetDictionary allows replacing the dictionary for this particular Data object +func (d *Data) SetDictionary(dict arrow.ArrayData) { + if d.dictionary != nil { + d.dictionary.Release() + d.dictionary = nil + } + if dict.(*Data) != nil { + dict.Retain() + d.dictionary = dict.(*Data) + } +} + +// NewSliceData returns a new slice that shares backing data with the input. +// The returned Data slice starts at i and extends j-i elements, such as: +// slice := data[i:j] +// The returned value must be Release'd after use. +// +// NewSliceData panics if the slice is outside the valid range of the input Data. +// NewSliceData panics if j < i. +func NewSliceData(data arrow.ArrayData, i, j int64) arrow.ArrayData { + if j > int64(data.Len()) || i > j || data.Offset()+int(i) > data.Offset()+data.Len() { + panic("arrow/array: index out of range") + } + + for _, b := range data.Buffers() { + if b != nil { + b.Retain() + } + } + + for _, child := range data.Children() { + if child != nil { + child.Retain() + } + } + + if data.(*Data).dictionary != nil { + data.(*Data).dictionary.Retain() + } + + o := &Data{ + refCount: 1, + dtype: data.DataType(), + nulls: UnknownNullCount, + length: int(j - i), + offset: data.Offset() + int(i), + buffers: data.Buffers(), + childData: data.Children(), + dictionary: data.(*Data).dictionary, + } + + if data.NullN() == 0 { + o.nulls = 0 + } + + return o +} + +func Hash(h *maphash.Hash, data arrow.ArrayData) { + a := data.(*Data) + + h.Write((*[bits.UintSize / 8]byte)(unsafe.Pointer(&a.length))[:]) + h.Write((*[bits.UintSize / 8]byte)(unsafe.Pointer(&a.length))[:]) + if len(a.buffers) > 0 && a.buffers[0] != nil { + h.Write(a.buffers[0].Bytes()) + } + for _, c := range a.childData { + Hash(h, c) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go new file mode 100644 index 00000000..bc2db26f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go @@ -0,0 +1,353 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "math" + "math/big" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/decimal128" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// A type which represents an immutable sequence of 128-bit decimal values. +type Decimal128 struct { + array + + values []decimal128.Num +} + +func NewDecimal128Data(data arrow.ArrayData) *Decimal128 { + a := &Decimal128{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Decimal128) Value(i int) decimal128.Num { return a.values[i] } +func (a *Decimal128) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } else { + return a.GetOneForMarshal(i).(string) + } +} + +func (a *Decimal128) Values() []decimal128.Num { return a.values } + +func (a *Decimal128) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Decimal128) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Decimal128Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Decimal128) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + typ := a.DataType().(*arrow.Decimal128Type) + f := (&big.Float{}).SetInt(a.Value(i).BigInt()) + f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale)))) + return f.Text('g', int(typ.Precision)) +} + +// ["1.23", ] +func (a *Decimal128) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualDecimal128(left, right *Decimal128) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type Decimal128Builder struct { + builder + + dtype *arrow.Decimal128Type + data *memory.Buffer + rawData []decimal128.Num +} + +func NewDecimal128Builder(mem memory.Allocator, dtype *arrow.Decimal128Type) *Decimal128Builder { + return &Decimal128Builder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + } +} + +func (b *Decimal128Builder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Decimal128Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Decimal128Builder) Append(v decimal128.Num) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Decimal128Builder) UnsafeAppend(v decimal128.Num) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Decimal128Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Decimal128Builder) AppendEmptyValue() { + b.Append(decimal128.Num{}) +} + +func (b *Decimal128Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Decimal128Builder) AppendValues(v []decimal128.Num, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + arrow.Decimal128Traits.Copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Decimal128Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Decimal128Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Decimal128Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Decimal128Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Decimal128Traits.BytesRequired(n)) + b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder +// so it can be used to build a new array. +func (b *Decimal128Builder) NewArray() arrow.Array { + return b.NewDecimal128Array() +} + +// NewDecimal128Array creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder +// so it can be used to build a new array. +func (b *Decimal128Builder) NewDecimal128Array() (a *Decimal128) { + data := b.newData() + a = NewDecimal128Data(data) + data.Release() + return +} + +func (b *Decimal128Builder) newData() (data *Data) { + bytesRequired := arrow.Decimal128Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Decimal128Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := decimal128.FromString(s, b.dtype.Precision, b.dtype.Scale) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + +func (b *Decimal128Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case float64: + val, err := decimal128.FromFloat64(v, b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(val) + case string: + val, err := decimal128.FromString(v, b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(val) + case json.Number: + val, err := decimal128.FromString(v.String(), b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(val) + case nil: + b.AppendNull() + return nil + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(decimal128.Num{}), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Decimal128Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON will add the unmarshalled values to this builder. +// +// If the values are strings, they will get parsed with big.ParseFloat using +// a rounding mode of big.ToNearestAway currently. +func (b *Decimal128Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("decimal128 builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Decimal128)(nil) + _ Builder = (*Decimal128Builder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go new file mode 100644 index 00000000..0b9cfed1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go @@ -0,0 +1,352 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "math" + "math/big" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/decimal256" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// Decimal256 is a type that represents an immutable sequence of 256-bit decimal values. +type Decimal256 struct { + array + + values []decimal256.Num +} + +func NewDecimal256Data(data arrow.ArrayData) *Decimal256 { + a := &Decimal256{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Decimal256) Value(i int) decimal256.Num { return a.values[i] } +func (a *Decimal256) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } else { + return a.GetOneForMarshal(i).(string) + } +} + +func (a *Decimal256) Values() []decimal256.Num { return a.values } + +func (a *Decimal256) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Decimal256) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Decimal256Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Decimal256) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + typ := a.DataType().(*arrow.Decimal256Type) + f := (&big.Float{}).SetInt(a.Value(i).BigInt()) + f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale)))) + return f.Text('g', int(typ.Precision)) +} + +func (a *Decimal256) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualDecimal256(left, right *Decimal256) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type Decimal256Builder struct { + builder + + dtype *arrow.Decimal256Type + data *memory.Buffer + rawData []decimal256.Num +} + +func NewDecimal256Builder(mem memory.Allocator, dtype *arrow.Decimal256Type) *Decimal256Builder { + return &Decimal256Builder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Decimal256Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Decimal256Builder) Append(v decimal256.Num) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Decimal256Builder) UnsafeAppend(v decimal256.Num) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Decimal256Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Decimal256Builder) AppendEmptyValue() { + b.Append(decimal256.Num{}) +} + +func (b *Decimal256Builder) Type() arrow.DataType { return b.dtype } + +func (b *Decimal256Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Decimal256Builder) AppendValues(v []decimal256.Num, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("arrow/array: len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + arrow.Decimal256Traits.Copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Decimal256Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Decimal256Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Decimal256Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Decimal256Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Decimal256Traits.BytesRequired(n)) + b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder +// so it can be used to build a new array. +func (b *Decimal256Builder) NewArray() arrow.Array { + return b.NewDecimal256Array() +} + +// NewDecimal256Array creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder +// so it can be used to build a new array. +func (b *Decimal256Builder) NewDecimal256Array() (a *Decimal256) { + data := b.newData() + a = NewDecimal256Data(data) + data.Release() + return +} + +func (b *Decimal256Builder) newData() (data *Data) { + bytesRequired := arrow.Decimal256Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Decimal256Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := decimal256.FromString(s, b.dtype.Precision, b.dtype.Scale) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + +func (b *Decimal256Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case float64: + val, err := decimal256.FromFloat64(v, b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(val) + case string: + out, err := decimal256.FromString(v, b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(out) + case json.Number: + out, err := decimal256.FromString(v.String(), b.dtype.Precision, b.dtype.Scale) + if err != nil { + return err + } + b.Append(out) + case nil: + b.AppendNull() + return nil + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(decimal256.Num{}), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Decimal256Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON will add the unmarshalled values to this builder. +// +// If the values are strings, they will get parsed with big.ParseFloat using +// a rounding mode of big.ToNearestAway currently. +func (b *Decimal256Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("arrow/array: decimal256 builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Decimal256)(nil) + _ Builder = (*Decimal256Builder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go new file mode 100644 index 00000000..6ecda0b9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go @@ -0,0 +1,1731 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/bits" + "sync/atomic" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/decimal128" + "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/hashing" + "github.com/apache/arrow/go/v12/internal/utils" + "github.com/goccy/go-json" +) + +// Dictionary represents the type for dictionary-encoded data with a data +// dependent dictionary. +// +// A dictionary array contains an array of non-negative integers (the "dictionary" +// indices") along with a data type containing a "dictionary" corresponding to +// the distinct values represented in the data. +// +// For example, the array: +// +// ["foo", "bar", "foo", "bar", "foo", "bar"] +// +// with dictionary ["bar", "foo"], would have the representation of: +// +// indices: [1, 0, 1, 0, 1, 0] +// dictionary: ["bar", "foo"] +// +// The indices in principle may be any integer type. +type Dictionary struct { + array + + indices arrow.Array + dict arrow.Array +} + +// NewDictionaryArray constructs a dictionary array with the provided indices +// and dictionary using the given type. +func NewDictionaryArray(typ arrow.DataType, indices, dict arrow.Array) *Dictionary { + a := &Dictionary{} + a.array.refCount = 1 + dictdata := NewData(typ, indices.Len(), indices.Data().Buffers(), indices.Data().Children(), indices.NullN(), indices.Data().Offset()) + dictdata.dictionary = dict.Data().(*Data) + dict.Data().Retain() + + defer dictdata.Release() + a.setData(dictdata) + return a +} + +// checkIndexBounds returns an error if any value in the provided integer +// arraydata is >= the passed upperlimit or < 0. otherwise nil +func checkIndexBounds(indices *Data, upperlimit uint64) error { + if indices.length == 0 { + return nil + } + + var maxval uint64 + switch indices.dtype.ID() { + case arrow.UINT8: + maxval = math.MaxUint8 + case arrow.UINT16: + maxval = math.MaxUint16 + case arrow.UINT32: + maxval = math.MaxUint32 + case arrow.UINT64: + maxval = math.MaxUint64 + } + // for unsigned integers, if the values array is larger than the maximum + // index value (especially for UINT8/UINT16), then there's no need to + // boundscheck. for signed integers we still need to bounds check + // because a value could be < 0. + isSigned := maxval == 0 + if !isSigned && upperlimit > maxval { + return nil + } + + start := indices.offset + end := indices.offset + indices.length + + // TODO(ARROW-15950): lift BitSetRunReader from parquet to utils + // and use it here for performance improvement. + + switch indices.dtype.ID() { + case arrow.INT8: + data := arrow.Int8Traits.CastFromBytes(indices.buffers[1].Bytes()) + min, max := utils.GetMinMaxInt8(data[start:end]) + if min < 0 || max >= int8(upperlimit) { + return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max) + } + case arrow.UINT8: + data := arrow.Uint8Traits.CastFromBytes(indices.buffers[1].Bytes()) + _, max := utils.GetMinMaxUint8(data[start:end]) + if max >= uint8(upperlimit) { + return fmt.Errorf("contains out of bounds index: max: %d", max) + } + case arrow.INT16: + data := arrow.Int16Traits.CastFromBytes(indices.buffers[1].Bytes()) + min, max := utils.GetMinMaxInt16(data[start:end]) + if min < 0 || max >= int16(upperlimit) { + return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max) + } + case arrow.UINT16: + data := arrow.Uint16Traits.CastFromBytes(indices.buffers[1].Bytes()) + _, max := utils.GetMinMaxUint16(data[start:end]) + if max >= uint16(upperlimit) { + return fmt.Errorf("contains out of bounds index: max: %d", max) + } + case arrow.INT32: + data := arrow.Int32Traits.CastFromBytes(indices.buffers[1].Bytes()) + min, max := utils.GetMinMaxInt32(data[start:end]) + if min < 0 || max >= int32(upperlimit) { + return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max) + } + case arrow.UINT32: + data := arrow.Uint32Traits.CastFromBytes(indices.buffers[1].Bytes()) + _, max := utils.GetMinMaxUint32(data[start:end]) + if max >= uint32(upperlimit) { + return fmt.Errorf("contains out of bounds index: max: %d", max) + } + case arrow.INT64: + data := arrow.Int64Traits.CastFromBytes(indices.buffers[1].Bytes()) + min, max := utils.GetMinMaxInt64(data[start:end]) + if min < 0 || max >= int64(upperlimit) { + return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max) + } + case arrow.UINT64: + data := arrow.Uint64Traits.CastFromBytes(indices.buffers[1].Bytes()) + _, max := utils.GetMinMaxUint64(data[indices.offset : indices.offset+indices.length]) + if max >= upperlimit { + return fmt.Errorf("contains out of bounds value: max: %d", max) + } + default: + return fmt.Errorf("invalid type for bounds checking: %T", indices.dtype) + } + + return nil +} + +// NewValidatedDictionaryArray constructs a dictionary array from the provided indices +// and dictionary arrays, while also performing validation checks to ensure correctness +// such as bounds checking at are usually skipped for performance. +func NewValidatedDictionaryArray(typ *arrow.DictionaryType, indices, dict arrow.Array) (*Dictionary, error) { + if indices.DataType().ID() != typ.IndexType.ID() { + return nil, fmt.Errorf("dictionary type index (%T) does not match indices array type (%T)", typ.IndexType, indices.DataType()) + } + + if !arrow.TypeEqual(typ.ValueType, dict.DataType()) { + return nil, fmt.Errorf("dictionary value type (%T) does not match dict array type (%T)", typ.ValueType, dict.DataType()) + } + + if err := checkIndexBounds(indices.Data().(*Data), uint64(dict.Len())); err != nil { + return nil, err + } + + return NewDictionaryArray(typ, indices, dict), nil +} + +// NewDictionaryData creates a strongly typed Dictionary array from +// an ArrayData object with a datatype of arrow.Dictionary and a dictionary +func NewDictionaryData(data arrow.ArrayData) *Dictionary { + a := &Dictionary{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (d *Dictionary) Retain() { + atomic.AddInt64(&d.refCount, 1) +} + +func (d *Dictionary) Release() { + debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases") + + if atomic.AddInt64(&d.refCount, -1) == 0 { + d.data.Release() + d.data, d.nullBitmapBytes = nil, nil + d.indices.Release() + d.indices = nil + if d.dict != nil { + d.dict.Release() + d.dict = nil + } + } +} + +func (d *Dictionary) setData(data *Data) { + d.array.setData(data) + + dictType := data.dtype.(*arrow.DictionaryType) + if data.dictionary == nil { + if data.length > 0 { + panic("arrow/array: no dictionary set in Data for Dictionary array") + } + } else { + debug.Assert(arrow.TypeEqual(dictType.ValueType, data.dictionary.DataType()), "mismatched dictionary value types") + } + + indexData := NewData(dictType.IndexType, data.length, data.buffers, data.childData, data.nulls, data.offset) + defer indexData.Release() + d.indices = MakeFromData(indexData) +} + +// Dictionary returns the values array that makes up the dictionary for this +// array. +func (d *Dictionary) Dictionary() arrow.Array { + if d.dict == nil { + d.dict = MakeFromData(d.data.dictionary) + } + return d.dict +} + +// Indices returns the underlying array of indices as it's own array +func (d *Dictionary) Indices() arrow.Array { + return d.indices +} + +// CanCompareIndices returns true if the dictionary arrays can be compared +// without having to unify the dictionaries themselves first. +// This means that the index types are equal too. +func (d *Dictionary) CanCompareIndices(other *Dictionary) bool { + if !arrow.TypeEqual(d.indices.DataType(), other.indices.DataType()) { + return false + } + + minlen := int64(min(d.data.dictionary.length, other.data.dictionary.length)) + return ArraySliceEqual(d.Dictionary(), 0, minlen, other.Dictionary(), 0, minlen) +} + +func (d *Dictionary) ValueStr(i int) string { + return d.Dictionary().ValueStr(d.GetValueIndex(i)) +} + +func (d *Dictionary) String() string { + return fmt.Sprintf("{ dictionary: %v\n indices: %v }", d.Dictionary(), d.Indices()) +} + +// GetValueIndex returns the dictionary index for the value at index i of the array. +// The actual value can be retrieved by using d.Dictionary().(valuetype).Value(d.GetValueIndex(i)) +func (d *Dictionary) GetValueIndex(i int) int { + indiceData := d.data.buffers[1].Bytes() + // we know the value is non-negative per the spec, so + // we can use the unsigned value regardless. + switch d.indices.DataType().ID() { + case arrow.UINT8, arrow.INT8: + return int(uint8(indiceData[d.data.offset+i])) + case arrow.UINT16, arrow.INT16: + return int(arrow.Uint16Traits.CastFromBytes(indiceData)[d.data.offset+i]) + case arrow.UINT32, arrow.INT32: + idx := arrow.Uint32Traits.CastFromBytes(indiceData)[d.data.offset+i] + debug.Assert(bits.UintSize == 64 || idx <= math.MaxInt32, "arrow/dictionary: truncation of index value") + return int(idx) + case arrow.UINT64, arrow.INT64: + idx := arrow.Uint64Traits.CastFromBytes(indiceData)[d.data.offset+i] + debug.Assert((bits.UintSize == 32 && idx <= math.MaxInt32) || (bits.UintSize == 64 && idx <= math.MaxInt64), "arrow/dictionary: truncation of index value") + return int(idx) + } + debug.Assert(false, "unreachable dictionary index") + return -1 +} + +func (d *Dictionary) GetOneForMarshal(i int) interface{} { + if d.IsNull(i) { + return nil + } + vidx := d.GetValueIndex(i) + return d.Dictionary().GetOneForMarshal(vidx) +} + +func (d *Dictionary) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, d.Len()) + for i := 0; i < d.Len(); i++ { + vals[i] = d.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualDict(l, r *Dictionary) bool { + return ArrayEqual(l.Dictionary(), r.Dictionary()) && ArrayEqual(l.indices, r.indices) +} + +func arrayApproxEqualDict(l, r *Dictionary, opt equalOption) bool { + return arrayApproxEqual(l.Dictionary(), r.Dictionary(), opt) && arrayApproxEqual(l.indices, r.indices, opt) +} + +// helper for building the properly typed indices of the dictionary builder +type indexBuilder struct { + Builder + Append func(int) +} + +func createIndexBuilder(mem memory.Allocator, dt arrow.FixedWidthDataType) (ret indexBuilder, err error) { + ret = indexBuilder{Builder: NewBuilder(mem, dt)} + switch dt.ID() { + case arrow.INT8: + ret.Append = func(idx int) { + ret.Builder.(*Int8Builder).Append(int8(idx)) + } + case arrow.UINT8: + ret.Append = func(idx int) { + ret.Builder.(*Uint8Builder).Append(uint8(idx)) + } + case arrow.INT16: + ret.Append = func(idx int) { + ret.Builder.(*Int16Builder).Append(int16(idx)) + } + case arrow.UINT16: + ret.Append = func(idx int) { + ret.Builder.(*Uint16Builder).Append(uint16(idx)) + } + case arrow.INT32: + ret.Append = func(idx int) { + ret.Builder.(*Int32Builder).Append(int32(idx)) + } + case arrow.UINT32: + ret.Append = func(idx int) { + ret.Builder.(*Uint32Builder).Append(uint32(idx)) + } + case arrow.INT64: + ret.Append = func(idx int) { + ret.Builder.(*Int64Builder).Append(int64(idx)) + } + case arrow.UINT64: + ret.Append = func(idx int) { + ret.Builder.(*Uint64Builder).Append(uint64(idx)) + } + default: + debug.Assert(false, "dictionary index type must be integral") + err = fmt.Errorf("dictionary index type must be integral, not %s", dt) + } + + return +} + +// helper function to construct an appropriately typed memo table based on +// the value type for the dictionary +func createMemoTable(mem memory.Allocator, dt arrow.DataType) (ret hashing.MemoTable, err error) { + switch dt.ID() { + case arrow.INT8: + ret = hashing.NewInt8MemoTable(0) + case arrow.UINT8: + ret = hashing.NewUint8MemoTable(0) + case arrow.INT16: + ret = hashing.NewInt16MemoTable(0) + case arrow.UINT16: + ret = hashing.NewUint16MemoTable(0) + case arrow.INT32: + ret = hashing.NewInt32MemoTable(0) + case arrow.UINT32: + ret = hashing.NewUint32MemoTable(0) + case arrow.INT64: + ret = hashing.NewInt64MemoTable(0) + case arrow.UINT64: + ret = hashing.NewUint64MemoTable(0) + case arrow.DURATION, arrow.TIMESTAMP, arrow.DATE64, arrow.TIME64: + ret = hashing.NewInt64MemoTable(0) + case arrow.TIME32, arrow.DATE32, arrow.INTERVAL_MONTHS: + ret = hashing.NewInt32MemoTable(0) + case arrow.FLOAT16: + ret = hashing.NewUint16MemoTable(0) + case arrow.FLOAT32: + ret = hashing.NewFloat32MemoTable(0) + case arrow.FLOAT64: + ret = hashing.NewFloat64MemoTable(0) + case arrow.BINARY, arrow.FIXED_SIZE_BINARY, arrow.DECIMAL128, arrow.DECIMAL256, arrow.INTERVAL_DAY_TIME, arrow.INTERVAL_MONTH_DAY_NANO: + ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.Binary)) + case arrow.STRING: + ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.String)) + case arrow.NULL: + default: + err = fmt.Errorf("unimplemented dictionary value type, %s", dt) + } + + return +} + +type DictionaryBuilder interface { + Builder + + NewDictionaryArray() *Dictionary + NewDelta() (indices, delta arrow.Array, err error) + AppendArray(arrow.Array) error + AppendIndices([]int, []bool) + ResetFull() +} + +type dictionaryBuilder struct { + builder + + dt *arrow.DictionaryType + deltaOffset int + memoTable hashing.MemoTable + idxBuilder indexBuilder +} + +// NewDictionaryBuilderWithDict initializes a dictionary builder and inserts the values from `init` as the first +// values in the dictionary, but does not insert them as values into the array. +func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder { + if init != nil && !arrow.TypeEqual(dt.ValueType, init.DataType()) { + panic(fmt.Errorf("arrow/array: cannot initialize dictionary type %T with array of type %T", dt.ValueType, init.DataType())) + } + + idxbldr, err := createIndexBuilder(mem, dt.IndexType.(arrow.FixedWidthDataType)) + if err != nil { + panic(fmt.Errorf("arrow/array: unsupported builder for index type of %T", dt)) + } + + memo, err := createMemoTable(mem, dt.ValueType) + if err != nil { + panic(fmt.Errorf("arrow/array: unsupported builder for value type of %T", dt)) + } + + bldr := dictionaryBuilder{ + builder: builder{refCount: 1, mem: mem}, + idxBuilder: idxbldr, + memoTable: memo, + dt: dt, + } + + switch dt.ValueType.ID() { + case arrow.NULL: + ret := &NullDictionaryBuilder{bldr} + debug.Assert(init == nil, "arrow/array: doesn't make sense to init a null dictionary") + return ret + case arrow.UINT8: + ret := &Uint8DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Uint8)); err != nil { + panic(err) + } + } + return ret + case arrow.INT8: + ret := &Int8DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Int8)); err != nil { + panic(err) + } + } + return ret + case arrow.UINT16: + ret := &Uint16DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Uint16)); err != nil { + panic(err) + } + } + return ret + case arrow.INT16: + ret := &Int16DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Int16)); err != nil { + panic(err) + } + } + return ret + case arrow.UINT32: + ret := &Uint32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Uint32)); err != nil { + panic(err) + } + } + return ret + case arrow.INT32: + ret := &Int32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Int32)); err != nil { + panic(err) + } + } + return ret + case arrow.UINT64: + ret := &Uint64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Uint64)); err != nil { + panic(err) + } + } + return ret + case arrow.INT64: + ret := &Int64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Int64)); err != nil { + panic(err) + } + } + return ret + case arrow.FLOAT16: + ret := &Float16DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Float16)); err != nil { + panic(err) + } + } + return ret + case arrow.FLOAT32: + ret := &Float32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Float32)); err != nil { + panic(err) + } + } + return ret + case arrow.FLOAT64: + ret := &Float64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Float64)); err != nil { + panic(err) + } + } + return ret + case arrow.STRING: + ret := &BinaryDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertStringDictValues(init.(*String)); err != nil { + panic(err) + } + } + return ret + case arrow.BINARY: + ret := &BinaryDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Binary)); err != nil { + panic(err) + } + } + return ret + case arrow.FIXED_SIZE_BINARY: + ret := &FixedSizeBinaryDictionaryBuilder{ + bldr, dt.ValueType.(*arrow.FixedSizeBinaryType).ByteWidth, + } + if init != nil { + if err = ret.InsertDictValues(init.(*FixedSizeBinary)); err != nil { + panic(err) + } + } + return ret + case arrow.DATE32: + ret := &Date32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Date32)); err != nil { + panic(err) + } + } + return ret + case arrow.DATE64: + ret := &Date64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Date64)); err != nil { + panic(err) + } + } + return ret + case arrow.TIMESTAMP: + ret := &TimestampDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Timestamp)); err != nil { + panic(err) + } + } + return ret + case arrow.TIME32: + ret := &Time32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Time32)); err != nil { + panic(err) + } + } + return ret + case arrow.TIME64: + ret := &Time64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Time64)); err != nil { + panic(err) + } + } + return ret + case arrow.INTERVAL_MONTHS: + ret := &MonthIntervalDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*MonthInterval)); err != nil { + panic(err) + } + } + return ret + case arrow.INTERVAL_DAY_TIME: + ret := &DayTimeDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*DayTimeInterval)); err != nil { + panic(err) + } + } + return ret + case arrow.DECIMAL128: + ret := &Decimal128DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Decimal128)); err != nil { + panic(err) + } + } + return ret + case arrow.DECIMAL256: + ret := &Decimal256DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Decimal256)); err != nil { + panic(err) + } + } + return ret + case arrow.LIST: + case arrow.STRUCT: + case arrow.SPARSE_UNION: + case arrow.DENSE_UNION: + case arrow.DICTIONARY: + case arrow.MAP: + case arrow.EXTENSION: + case arrow.FIXED_SIZE_LIST: + case arrow.DURATION: + ret := &DurationDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Duration)); err != nil { + panic(err) + } + } + return ret + case arrow.LARGE_STRING: + case arrow.LARGE_BINARY: + case arrow.LARGE_LIST: + case arrow.INTERVAL_MONTH_DAY_NANO: + ret := &MonthDayNanoDictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*MonthDayNanoInterval)); err != nil { + panic(err) + } + } + return ret + } + + panic("arrow/array: unimplemented dictionary key type") +} + +func NewDictionaryBuilder(mem memory.Allocator, dt *arrow.DictionaryType) DictionaryBuilder { + return NewDictionaryBuilderWithDict(mem, dt, nil) +} + +func (b *dictionaryBuilder) Type() arrow.DataType { return b.dt } + +func (b *dictionaryBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + b.idxBuilder.Release() + b.idxBuilder.Builder = nil + if binmemo, ok := b.memoTable.(*hashing.BinaryMemoTable); ok { + binmemo.Release() + } + b.memoTable = nil + } +} + +func (b *dictionaryBuilder) AppendNull() { + b.length += 1 + b.nulls += 1 + b.idxBuilder.AppendNull() +} + +func (b *dictionaryBuilder) AppendEmptyValue() { + b.length += 1 + b.idxBuilder.AppendEmptyValue() +} + +func (b *dictionaryBuilder) Reserve(n int) { + b.idxBuilder.Reserve(n) +} + +func (b *dictionaryBuilder) Resize(n int) { + b.idxBuilder.Resize(n) + b.length = b.idxBuilder.Len() +} + +func (b *dictionaryBuilder) ResetFull() { + b.builder.reset() + b.idxBuilder.NewArray().Release() + b.memoTable.Reset() +} + +func (b *dictionaryBuilder) Cap() int { return b.idxBuilder.Cap() } + +// UnmarshalJSON is not yet implemented for dictionary builders and will always error. +func (b *dictionaryBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("dictionary builder must upack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +func (b *dictionaryBuilder) Unmarshal(dec *json.Decoder) error { + bldr := NewBuilder(b.mem, b.dt.ValueType) + defer bldr.Release() + + if err := bldr.Unmarshal(dec); err != nil { + return err + } + + arr := bldr.NewArray() + defer arr.Release() + return b.AppendArray(arr) +} + +func (b *dictionaryBuilder) AppendValueFromString(s string) error { + return fmt.Errorf("%w: AppendValueFromString to dictionary not yet implemented", arrow.ErrNotImplemented) +} + +func (b *dictionaryBuilder) UnmarshalOne(dec *json.Decoder) error { + return errors.New("unmarshal json to dictionary not yet implemented") +} + +func (b *dictionaryBuilder) NewArray() arrow.Array { + return b.NewDictionaryArray() +} + +func (b *dictionaryBuilder) newData() *Data { + indices, dict, err := b.newWithDictOffset(0) + if err != nil { + panic(err) + } + + indices.dtype = b.dt + indices.dictionary = dict + return indices +} + +func (b *dictionaryBuilder) NewDictionaryArray() *Dictionary { + a := &Dictionary{} + a.refCount = 1 + + indices := b.newData() + a.setData(indices) + indices.Release() + return a +} + +func (b *dictionaryBuilder) newWithDictOffset(offset int) (indices, dict *Data, err error) { + idxarr := b.idxBuilder.NewArray() + defer idxarr.Release() + + indices = idxarr.Data().(*Data) + indices.Retain() + + b.deltaOffset = b.memoTable.Size() + dict, err = GetDictArrayData(b.mem, b.dt.ValueType, b.memoTable, offset) + b.reset() + return +} + +// NewDelta returns the dictionary indices and a delta dictionary since the +// last time NewArray or NewDictionaryArray were called, and resets the state +// of the builder (except for the dictionary / memotable) +func (b *dictionaryBuilder) NewDelta() (indices, delta arrow.Array, err error) { + indicesData, deltaData, err := b.newWithDictOffset(b.deltaOffset) + if err != nil { + return nil, nil, err + } + + defer indicesData.Release() + defer deltaData.Release() + indices, delta = MakeFromData(indicesData), MakeFromData(deltaData) + return +} + +func (b *dictionaryBuilder) insertDictValue(val interface{}) error { + _, _, err := b.memoTable.GetOrInsert(val) + return err +} + +func (b *dictionaryBuilder) appendValue(val interface{}) error { + idx, _, err := b.memoTable.GetOrInsert(val) + b.idxBuilder.Append(idx) + b.length += 1 + return err +} + +func getvalFn(arr arrow.Array) func(i int) interface{} { + switch typedarr := arr.(type) { + case *Int8: + return func(i int) interface{} { return typedarr.Value(i) } + case *Uint8: + return func(i int) interface{} { return typedarr.Value(i) } + case *Int16: + return func(i int) interface{} { return typedarr.Value(i) } + case *Uint16: + return func(i int) interface{} { return typedarr.Value(i) } + case *Int32: + return func(i int) interface{} { return typedarr.Value(i) } + case *Uint32: + return func(i int) interface{} { return typedarr.Value(i) } + case *Int64: + return func(i int) interface{} { return typedarr.Value(i) } + case *Uint64: + return func(i int) interface{} { return typedarr.Value(i) } + case *Float16: + return func(i int) interface{} { return typedarr.Value(i).Uint16() } + case *Float32: + return func(i int) interface{} { return typedarr.Value(i) } + case *Float64: + return func(i int) interface{} { return typedarr.Value(i) } + case *Duration: + return func(i int) interface{} { return int64(typedarr.Value(i)) } + case *Timestamp: + return func(i int) interface{} { return int64(typedarr.Value(i)) } + case *Date64: + return func(i int) interface{} { return int64(typedarr.Value(i)) } + case *Time64: + return func(i int) interface{} { return int64(typedarr.Value(i)) } + case *Time32: + return func(i int) interface{} { return int32(typedarr.Value(i)) } + case *Date32: + return func(i int) interface{} { return int32(typedarr.Value(i)) } + case *MonthInterval: + return func(i int) interface{} { return int32(typedarr.Value(i)) } + case *Binary: + return func(i int) interface{} { return typedarr.Value(i) } + case *FixedSizeBinary: + return func(i int) interface{} { return typedarr.Value(i) } + case *String: + return func(i int) interface{} { return typedarr.Value(i) } + case *Decimal128: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&val)))[:] + } + case *DayTimeInterval: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.DayTimeIntervalSizeBytes]byte)(unsafe.Pointer(&val)))[:] + } + case *MonthDayNanoInterval: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.MonthDayNanoIntervalSizeBytes]byte)(unsafe.Pointer(&val)))[:] + } + } + + panic("arrow/array: invalid dictionary value type") +} + +func (b *dictionaryBuilder) AppendArray(arr arrow.Array) error { + debug.Assert(arrow.TypeEqual(b.dt.ValueType, arr.DataType()), "wrong value type of array to append to dict") + + valfn := getvalFn(arr) + for i := 0; i < arr.Len(); i++ { + if arr.IsNull(i) { + b.AppendNull() + } else { + if err := b.appendValue(valfn(i)); err != nil { + return err + } + } + } + return nil +} + +func (b *dictionaryBuilder) AppendIndices(indices []int, valid []bool) { + b.length += len(indices) + switch idxbldr := b.idxBuilder.Builder.(type) { + case *Int8Builder: + vals := make([]int8, len(indices)) + for i, v := range indices { + vals[i] = int8(v) + } + idxbldr.AppendValues(vals, valid) + case *Int16Builder: + vals := make([]int16, len(indices)) + for i, v := range indices { + vals[i] = int16(v) + } + idxbldr.AppendValues(vals, valid) + case *Int32Builder: + vals := make([]int32, len(indices)) + for i, v := range indices { + vals[i] = int32(v) + } + idxbldr.AppendValues(vals, valid) + case *Int64Builder: + vals := make([]int64, len(indices)) + for i, v := range indices { + vals[i] = int64(v) + } + idxbldr.AppendValues(vals, valid) + case *Uint8Builder: + vals := make([]uint8, len(indices)) + for i, v := range indices { + vals[i] = uint8(v) + } + idxbldr.AppendValues(vals, valid) + case *Uint16Builder: + vals := make([]uint16, len(indices)) + for i, v := range indices { + vals[i] = uint16(v) + } + idxbldr.AppendValues(vals, valid) + case *Uint32Builder: + vals := make([]uint32, len(indices)) + for i, v := range indices { + vals[i] = uint32(v) + } + idxbldr.AppendValues(vals, valid) + case *Uint64Builder: + vals := make([]uint64, len(indices)) + for i, v := range indices { + vals[i] = uint64(v) + } + idxbldr.AppendValues(vals, valid) + } +} + +type NullDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *NullDictionaryBuilder) NewArray() arrow.Array { + return b.NewDictionaryArray() +} + +func (b *NullDictionaryBuilder) NewDictionaryArray() *Dictionary { + idxarr := b.idxBuilder.NewArray() + defer idxarr.Release() + + out := idxarr.Data().(*Data) + dictarr := NewNull(0) + defer dictarr.Release() + + dictarr.data.Retain() + out.dtype = b.dt + out.dictionary = dictarr.data + + return NewDictionaryData(out) +} + +func (b *NullDictionaryBuilder) AppendArray(arr arrow.Array) error { + if arr.DataType().ID() != arrow.NULL { + return fmt.Errorf("cannot append non-null array to null dictionary") + } + + for i := 0; i < arr.(*Null).Len(); i++ { + b.AppendNull() + } + return nil +} + +type Int8DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Int8DictionaryBuilder) Append(v int8) error { return b.appendValue(v) } +func (b *Int8DictionaryBuilder) InsertDictValues(arr *Int8) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Uint8DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Uint8DictionaryBuilder) Append(v uint8) error { return b.appendValue(v) } +func (b *Uint8DictionaryBuilder) InsertDictValues(arr *Uint8) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Int16DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Int16DictionaryBuilder) Append(v int16) error { return b.appendValue(v) } +func (b *Int16DictionaryBuilder) InsertDictValues(arr *Int16) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Uint16DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Uint16DictionaryBuilder) Append(v uint16) error { return b.appendValue(v) } +func (b *Uint16DictionaryBuilder) InsertDictValues(arr *Uint16) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Int32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Int32DictionaryBuilder) Append(v int32) error { return b.appendValue(v) } +func (b *Int32DictionaryBuilder) InsertDictValues(arr *Int32) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Uint32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Uint32DictionaryBuilder) Append(v uint32) error { return b.appendValue(v) } +func (b *Uint32DictionaryBuilder) InsertDictValues(arr *Uint32) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Int64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Int64DictionaryBuilder) Append(v int64) error { return b.appendValue(v) } +func (b *Int64DictionaryBuilder) InsertDictValues(arr *Int64) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Uint64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Uint64DictionaryBuilder) Append(v uint64) error { return b.appendValue(v) } +func (b *Uint64DictionaryBuilder) InsertDictValues(arr *Uint64) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type DurationDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *DurationDictionaryBuilder) Append(v arrow.Duration) error { return b.appendValue(int64(v)) } +func (b *DurationDictionaryBuilder) InsertDictValues(arr *Duration) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int64(v)); err != nil { + break + } + } + return +} + +type TimestampDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *TimestampDictionaryBuilder) Append(v arrow.Timestamp) error { return b.appendValue(int64(v)) } +func (b *TimestampDictionaryBuilder) InsertDictValues(arr *Timestamp) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int64(v)); err != nil { + break + } + } + return +} + +type Time32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Time32DictionaryBuilder) Append(v arrow.Time32) error { return b.appendValue(int32(v)) } +func (b *Time32DictionaryBuilder) InsertDictValues(arr *Time32) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int32(v)); err != nil { + break + } + } + return +} + +type Time64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Time64DictionaryBuilder) Append(v arrow.Time64) error { return b.appendValue(int64(v)) } +func (b *Time64DictionaryBuilder) InsertDictValues(arr *Time64) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int64(v)); err != nil { + break + } + } + return +} + +type Date32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Date32DictionaryBuilder) Append(v arrow.Date32) error { return b.appendValue(int32(v)) } +func (b *Date32DictionaryBuilder) InsertDictValues(arr *Date32) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int32(v)); err != nil { + break + } + } + return +} + +type Date64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Date64DictionaryBuilder) Append(v arrow.Date64) error { return b.appendValue(int64(v)) } +func (b *Date64DictionaryBuilder) InsertDictValues(arr *Date64) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int64(v)); err != nil { + break + } + } + return +} + +type MonthIntervalDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *MonthIntervalDictionaryBuilder) Append(v arrow.MonthInterval) error { + return b.appendValue(int32(v)) +} +func (b *MonthIntervalDictionaryBuilder) InsertDictValues(arr *MonthInterval) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(int32(v)); err != nil { + break + } + } + return +} + +type Float16DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Float16DictionaryBuilder) Append(v float16.Num) error { return b.appendValue(v.Uint16()) } +func (b *Float16DictionaryBuilder) InsertDictValues(arr *Float16) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v.Uint16()); err != nil { + break + } + } + return +} + +type Float32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Float32DictionaryBuilder) Append(v float32) error { return b.appendValue(v) } +func (b *Float32DictionaryBuilder) InsertDictValues(arr *Float32) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type Float64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Float64DictionaryBuilder) Append(v float64) error { return b.appendValue(v) } +func (b *Float64DictionaryBuilder) InsertDictValues(arr *Float64) (err error) { + for _, v := range arr.values { + if err = b.insertDictValue(v); err != nil { + break + } + } + return +} + +type BinaryDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *BinaryDictionaryBuilder) Append(v []byte) error { + if v == nil { + b.AppendNull() + return nil + } + return b.appendValue(v) +} +func (b *BinaryDictionaryBuilder) AppendString(v string) error { return b.appendValue(v) } +func (b *BinaryDictionaryBuilder) InsertDictValues(arr *Binary) (err error) { + if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) { + return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType) + } + + for i := 0; i < arr.Len(); i++ { + if err = b.insertDictValue(arr.Value(i)); err != nil { + break + } + } + return +} +func (b *BinaryDictionaryBuilder) InsertStringDictValues(arr *String) (err error) { + if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) { + return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType) + } + + for i := 0; i < arr.Len(); i++ { + if err = b.insertDictValue(arr.Value(i)); err != nil { + break + } + } + return +} + +type FixedSizeBinaryDictionaryBuilder struct { + dictionaryBuilder + byteWidth int +} + +func (b *FixedSizeBinaryDictionaryBuilder) Append(v []byte) error { + return b.appendValue(v[:b.byteWidth]) +} +func (b *FixedSizeBinaryDictionaryBuilder) InsertDictValues(arr *FixedSizeBinary) (err error) { + var ( + beg = arr.array.data.offset * b.byteWidth + end = (arr.array.data.offset + arr.data.length) * b.byteWidth + ) + data := arr.valueBytes[beg:end] + for len(data) > 0 { + if err = b.insertDictValue(data[:b.byteWidth]); err != nil { + break + } + data = data[b.byteWidth:] + } + return +} + +type Decimal128DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Decimal128DictionaryBuilder) Append(v decimal128.Num) error { + return b.appendValue((*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *Decimal128DictionaryBuilder) InsertDictValues(arr *Decimal128) (err error) { + data := arrow.Decimal128Traits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.Decimal128SizeBytes]); err != nil { + break + } + data = data[arrow.Decimal128SizeBytes:] + } + return +} + +type Decimal256DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Decimal256DictionaryBuilder) Append(v decimal128.Num) error { + return b.appendValue((*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *Decimal256DictionaryBuilder) InsertDictValues(arr *Decimal256) (err error) { + data := arrow.Decimal256Traits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.Decimal256SizeBytes]); err != nil { + break + } + data = data[arrow.Decimal256SizeBytes:] + } + return +} + +type MonthDayNanoDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *MonthDayNanoDictionaryBuilder) Append(v arrow.MonthDayNanoInterval) error { + return b.appendValue((*(*[arrow.MonthDayNanoIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *MonthDayNanoDictionaryBuilder) InsertDictValues(arr *MonthDayNanoInterval) (err error) { + data := arrow.MonthDayNanoIntervalTraits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.MonthDayNanoIntervalSizeBytes]); err != nil { + break + } + data = data[arrow.MonthDayNanoIntervalSizeBytes:] + } + return +} + +type DayTimeDictionaryBuilder struct { + dictionaryBuilder +} + +func (b *DayTimeDictionaryBuilder) Append(v arrow.DayTimeInterval) error { + return b.appendValue((*(*[arrow.DayTimeIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *DayTimeDictionaryBuilder) InsertDictValues(arr *DayTimeInterval) (err error) { + data := arrow.DayTimeIntervalTraits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.DayTimeIntervalSizeBytes]); err != nil { + break + } + data = data[arrow.DayTimeIntervalSizeBytes:] + } + return +} + +func IsTrivialTransposition(transposeMap []int32) bool { + for i, t := range transposeMap { + if t != int32(i) { + return false + } + } + return true +} + +func TransposeDictIndices(mem memory.Allocator, data arrow.ArrayData, inType, outType arrow.DataType, dict arrow.ArrayData, transposeMap []int32) (arrow.ArrayData, error) { + // inType may be different from data->dtype if data is ExtensionType + if inType.ID() != arrow.DICTIONARY || outType.ID() != arrow.DICTIONARY { + return nil, errors.New("arrow/array: expected dictionary type") + } + + var ( + inDictType = inType.(*arrow.DictionaryType) + outDictType = outType.(*arrow.DictionaryType) + inIndexType = inDictType.IndexType + outIndexType = outDictType.IndexType.(arrow.FixedWidthDataType) + ) + + if inIndexType.ID() == outIndexType.ID() && IsTrivialTransposition(transposeMap) { + // index type and values will be identical, we can reuse the existing buffers + return NewDataWithDictionary(outType, data.Len(), []*memory.Buffer{data.Buffers()[0], data.Buffers()[1]}, + data.NullN(), data.Offset(), dict.(*Data)), nil + } + + // default path: compute the transposed indices as a new buffer + outBuf := memory.NewResizableBuffer(mem) + outBuf.Resize(data.Len() * int(bitutil.BytesForBits(int64(outIndexType.BitWidth())))) + defer outBuf.Release() + + // shift null buffer if original offset is non-zero + var nullBitmap *memory.Buffer + if data.Offset() != 0 && data.NullN() != 0 { + nullBitmap = memory.NewResizableBuffer(mem) + nullBitmap.Resize(int(bitutil.BytesForBits(int64(data.Len())))) + bitutil.CopyBitmap(data.Buffers()[0].Bytes(), data.Offset(), data.Len(), nullBitmap.Bytes(), 0) + defer nullBitmap.Release() + } else { + nullBitmap = data.Buffers()[0] + } + + outData := NewDataWithDictionary(outType, data.Len(), + []*memory.Buffer{nullBitmap, outBuf}, data.NullN(), 0, dict.(*Data)) + err := utils.TransposeIntsBuffers(inIndexType, outIndexType, + data.Buffers()[1].Bytes(), outBuf.Bytes(), data.Offset(), outData.offset, data.Len(), transposeMap) + return outData, err +} + +// DictionaryUnifier defines the interface used for unifying, and optionally producing +// transposition maps for, multiple dictionary arrays incrementally. +type DictionaryUnifier interface { + // Unify adds the provided array of dictionary values to be unified. + Unify(arrow.Array) error + // UnifyAndTranspose adds the provided array of dictionary values, + // just like Unify but returns an allocated buffer containing a mapping + // to transpose dictionary indices. + UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error) + // GetResult returns the dictionary type (choosing the smallest index type + // that can represent all the values) and the new unified dictionary. + // + // Calling GetResult clears the existing dictionary from the unifier so it + // can be reused by calling Unify/UnifyAndTranspose again with new arrays. + GetResult() (outType arrow.DataType, outDict arrow.Array, err error) + // GetResultWithIndexType is like GetResult, but allows specifying the type + // of the dictionary indexes rather than letting the unifier pick. If the + // passed in index type isn't large enough to represent all of the dictionary + // values, an error will be returned instead. The new unified dictionary + // is returned. + GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) + // Release should be called to clean up any allocated scrach memo-table used + // for building the unified dictionary. + Release() +} + +type unifier struct { + mem memory.Allocator + valueType arrow.DataType + memoTable hashing.MemoTable +} + +// NewDictionaryUnifier constructs and returns a new dictionary unifier for dictionaries +// of valueType, using the provided allocator for allocating the unified dictionary +// and the memotable used for building it. +// +// This will only work for non-nested types currently. a nested valueType or dictionary type +// will result in an error. +func NewDictionaryUnifier(alloc memory.Allocator, valueType arrow.DataType) (DictionaryUnifier, error) { + memoTable, err := createMemoTable(alloc, valueType) + if err != nil { + return nil, err + } + return &unifier{ + mem: alloc, + valueType: valueType, + memoTable: memoTable, + }, nil +} + +func (u *unifier) Release() { + if bin, ok := u.memoTable.(*hashing.BinaryMemoTable); ok { + bin.Release() + } +} + +func (u *unifier) Unify(dict arrow.Array) (err error) { + if !arrow.TypeEqual(u.valueType, dict.DataType()) { + return fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), u.valueType) + } + + valFn := getvalFn(dict) + for i := 0; i < dict.Len(); i++ { + if dict.IsNull(i) { + u.memoTable.GetOrInsertNull() + continue + } + + if _, _, err = u.memoTable.GetOrInsert(valFn(i)); err != nil { + return err + } + } + return +} + +func (u *unifier) UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error) { + if !arrow.TypeEqual(u.valueType, dict.DataType()) { + return nil, fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), u.valueType) + } + + transposed = memory.NewResizableBuffer(u.mem) + transposed.Resize(arrow.Int32Traits.BytesRequired(dict.Len())) + + newIdxes := arrow.Int32Traits.CastFromBytes(transposed.Bytes()) + valFn := getvalFn(dict) + for i := 0; i < dict.Len(); i++ { + if dict.IsNull(i) { + idx, _ := u.memoTable.GetOrInsertNull() + newIdxes[i] = int32(idx) + continue + } + + idx, _, err := u.memoTable.GetOrInsert(valFn(i)) + if err != nil { + transposed.Release() + return nil, err + } + newIdxes[i] = int32(idx) + } + return +} + +func (u *unifier) GetResult() (outType arrow.DataType, outDict arrow.Array, err error) { + dictLen := u.memoTable.Size() + var indexType arrow.DataType + switch { + case dictLen <= math.MaxInt8: + indexType = arrow.PrimitiveTypes.Int8 + case dictLen <= math.MaxInt16: + indexType = arrow.PrimitiveTypes.Int16 + case dictLen <= math.MaxInt32: + indexType = arrow.PrimitiveTypes.Int32 + default: + indexType = arrow.PrimitiveTypes.Int64 + } + outType = &arrow.DictionaryType{IndexType: indexType, ValueType: u.valueType} + + dictData, err := GetDictArrayData(u.mem, u.valueType, u.memoTable, 0) + if err != nil { + return nil, nil, err + } + + u.memoTable.Reset() + + defer dictData.Release() + outDict = MakeFromData(dictData) + return +} + +func (u *unifier) GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) { + dictLen := u.memoTable.Size() + var toobig bool + switch indexType.ID() { + case arrow.UINT8: + toobig = dictLen > math.MaxUint8 + case arrow.INT8: + toobig = dictLen > math.MaxInt8 + case arrow.UINT16: + toobig = dictLen > math.MaxUint16 + case arrow.INT16: + toobig = dictLen > math.MaxInt16 + case arrow.UINT32: + toobig = uint(dictLen) > math.MaxUint32 + case arrow.INT32: + toobig = dictLen > math.MaxInt32 + case arrow.UINT64: + toobig = uint64(dictLen) > uint64(math.MaxUint64) + case arrow.INT64: + default: + return nil, fmt.Errorf("arrow/array: invalid dictionary index type: %s, must be integral", indexType) + } + if toobig { + return nil, errors.New("arrow/array: cannot combine dictionaries. unified dictionary requires a larger index type") + } + + dictData, err := GetDictArrayData(u.mem, u.valueType, u.memoTable, 0) + if err != nil { + return nil, err + } + + u.memoTable.Reset() + + defer dictData.Release() + return MakeFromData(dictData), nil +} + +func unifyRecursive(mem memory.Allocator, typ arrow.DataType, chunks []*Data) (changed bool, err error) { + debug.Assert(len(chunks) != 0, "must provide non-zero length chunk slice") + var extType arrow.DataType + + if typ.ID() == arrow.EXTENSION { + extType = typ + typ = typ.(arrow.ExtensionType).StorageType() + } + + if nestedTyp, ok := typ.(arrow.NestedType); ok { + children := make([]*Data, len(chunks)) + for i, f := range nestedTyp.Fields() { + for j, c := range chunks { + children[j] = c.childData[i].(*Data) + } + + childChanged, err := unifyRecursive(mem, f.Type, children) + if err != nil { + return false, err + } + if childChanged { + // only when unification actually occurs + for j := range chunks { + chunks[j].childData[i] = children[j] + } + changed = true + } + } + } + + if typ.ID() == arrow.DICTIONARY { + dictType := typ.(*arrow.DictionaryType) + var ( + uni DictionaryUnifier + newDict arrow.Array + ) + // unify any nested dictionaries first, but the unifier doesn't support + // nested dictionaries yet so this would fail. + uni, err = NewDictionaryUnifier(mem, dictType.ValueType) + if err != nil { + return changed, err + } + defer uni.Release() + transposeMaps := make([]*memory.Buffer, len(chunks)) + for i, c := range chunks { + debug.Assert(c.dictionary != nil, "missing dictionary data for dictionary array") + arr := MakeFromData(c.dictionary) + defer arr.Release() + if transposeMaps[i], err = uni.UnifyAndTranspose(arr); err != nil { + return + } + defer transposeMaps[i].Release() + } + + if newDict, err = uni.GetResultWithIndexType(dictType.IndexType); err != nil { + return + } + defer newDict.Release() + + for j := range chunks { + chnk, err := TransposeDictIndices(mem, chunks[j], typ, typ, newDict.Data(), arrow.Int32Traits.CastFromBytes(transposeMaps[j].Bytes())) + if err != nil { + return changed, err + } + chunks[j].Release() + chunks[j] = chnk.(*Data) + if extType != nil { + chunks[j].dtype = extType + } + } + changed = true + } + + return +} + +// UnifyChunkedDicts takes a chunked array of dictionary type and will unify +// the dictionary across all of the chunks with the returned chunked array +// having all chunks share the same dictionary. +// +// The return from this *must* have Release called on it unless an error is returned +// in which case the *arrow.Chunked will be nil. +// +// If there is 1 or fewer chunks, then nothing is modified and this function will just +// call Retain on the passed in Chunked array (so Release can safely be called on it). +// The same is true if the type of the array is not a dictionary or if no changes are +// needed for all of the chunks to be using the same dictionary. +func UnifyChunkedDicts(alloc memory.Allocator, chnkd *arrow.Chunked) (*arrow.Chunked, error) { + if len(chnkd.Chunks()) <= 1 { + chnkd.Retain() + return chnkd, nil + } + + chunksData := make([]*Data, len(chnkd.Chunks())) + for i, c := range chnkd.Chunks() { + c.Data().Retain() + chunksData[i] = c.Data().(*Data) + } + changed, err := unifyRecursive(alloc, chnkd.DataType(), chunksData) + if err != nil || !changed { + for _, c := range chunksData { + c.Release() + } + if err == nil { + chnkd.Retain() + } else { + chnkd = nil + } + return chnkd, err + } + + chunks := make([]arrow.Array, len(chunksData)) + for i, c := range chunksData { + chunks[i] = MakeFromData(c) + defer chunks[i].Release() + c.Release() + } + + return arrow.NewChunked(chnkd.DataType(), chunks), nil +} + +// UnifyTableDicts performs UnifyChunkedDicts on each column of the table so that +// any dictionary column will have the dictionaries of its chunks unified. +// +// The returned Table should always be Release'd unless a non-nil error was returned, +// in which case the table returned will be nil. +func UnifyTableDicts(alloc memory.Allocator, table arrow.Table) (arrow.Table, error) { + cols := make([]arrow.Column, table.NumCols()) + for i := 0; i < int(table.NumCols()); i++ { + chnkd, err := UnifyChunkedDicts(alloc, table.Column(i).Data()) + if err != nil { + return nil, err + } + defer chnkd.Release() + cols[i] = *arrow.NewColumn(table.Schema().Field(i), chnkd) + defer cols[i].Release() + } + return NewTable(table.Schema(), cols, table.NumRows()), nil +} + +var ( + _ arrow.Array = (*Dictionary)(nil) + _ Builder = (*dictionaryBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go new file mode 100644 index 00000000..32030173 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go @@ -0,0 +1,315 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/v12/arrow" +) + +// Edit represents one entry in the edit script to compare two arrays. +type Edit struct { + Insert bool + RunLength int64 +} + +// Edits is a slice of Edit structs that represents an edit script to compare two arrays. +// When applied to the base array, it produces the target array. +// Each element of "insert" determines whether an element was inserted into (true) +// or deleted from (false) base. Each insertion or deletion is followed by a run of +// elements which are unchanged from base to target; the length of this run is stored +// in RunLength. (Note that the edit script begins and ends with a run of shared +// elements but both fields of the struct must have the same length. To accommodate this +// the first element of "insert" should be ignored.) +// +// For example for base "hlloo" and target "hello", the edit script would be +// [ +// +// {"insert": false, "run_length": 1}, // leading run of length 1 ("h") +// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo") +// {"insert": false, "run_length": 0} // delete("o") then an empty run +// +// ] +type Edits []Edit + +// String returns a simple string representation of the edit script. +func (e Edits) String() string { + return fmt.Sprintf("%v", []Edit(e)) +} + +// UnifiedDiff returns a string representation of the diff of base and target in Unified Diff format. +func (e Edits) UnifiedDiff(base, target arrow.Array) string { + var s strings.Builder + baseIndex := int64(0) + targetIndex := int64(0) + wrotePosition := false + for i := 0; i < len(e); i++ { + if i > 0 { + if !wrotePosition { + s.WriteString(fmt.Sprintf("@@ -%d, +%d @@\n", baseIndex, targetIndex)) + wrotePosition = true + } + if e[i].Insert { + s.WriteString(fmt.Sprintf("+%v\n", stringAt(target, targetIndex))) + targetIndex++ + } else { + s.WriteString(fmt.Sprintf("-%v\n", stringAt(base, baseIndex))) + baseIndex++ + } + } + for j := int64(0); j < e[i].RunLength; j++ { + baseIndex++ + targetIndex++ + wrotePosition = false + } + } + return s.String() +} + +func stringAt(arr arrow.Array, i int64) string { + if arr.IsNull(int(i)) { + return "null" + } + dt := arr.DataType() + switch { + case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Float32): + return fmt.Sprintf("%f", arr.(*Float32).Value(int(i))) + case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Float64): + return fmt.Sprintf("%f", arr.(*Float64).Value(int(i))) + case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Date32): + return arr.(*Date32).Value(int(i)).FormattedString() + case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Date64): + return arr.(*Date64).Value(int(i)).FormattedString() + case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_s): + return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Second).String() + case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_ms): + return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Millisecond).String() + case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_us): + return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Microsecond).String() + case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_ns): + return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Nanosecond).String() + } + s := NewSlice(arr, i, i+1) + defer s.Release() + st, _ := s.MarshalJSON() + return strings.Trim(string(st[1:len(st)-1]), "\n") +} + +// Diff compares two arrays, returning an edit script which expresses the difference +// between them. The edit script can be applied to the base array to produce the target. +// 'base' is a baseline for comparison. +// 'target' is an array of identical type to base whose elements differ from base's. +func Diff(base, target arrow.Array) (edits Edits, err error) { + if !arrow.TypeEqual(base.DataType(), target.DataType()) { + return nil, fmt.Errorf("%w: only taking the diff of like-typed arrays is supported", arrow.ErrNotImplemented) + } + switch base.DataType().ID() { + case arrow.EXTENSION: + return Diff(base.(ExtensionArray).Storage(), target.(ExtensionArray).Storage()) + case arrow.DICTIONARY: + return nil, fmt.Errorf("%w: diffing arrays of type %s is not implemented", arrow.ErrNotImplemented, base.DataType()) + case arrow.RUN_END_ENCODED: + return nil, fmt.Errorf("%w: diffing arrays of type %s is not implemented", arrow.ErrNotImplemented, base.DataType()) + } + d := newQuadraticSpaceMyersDiff(base, target) + return d.Diff() +} + +// editPoint represents an intermediate state in the comparison of two arrays +type editPoint struct { + base int + target int +} + +type quadraticSpaceMyersDiff struct { + base arrow.Array + target arrow.Array + finishIndex int + editCount int + endpointBase []int + insert []bool + baseBegin int + targetBegin int + baseEnd int + targetEnd int +} + +func newQuadraticSpaceMyersDiff(base, target arrow.Array) *quadraticSpaceMyersDiff { + d := &quadraticSpaceMyersDiff{ + base: base, + target: target, + finishIndex: -1, + editCount: 0, + endpointBase: []int{}, + insert: []bool{}, + baseBegin: 0, + targetBegin: 0, + baseEnd: base.Len(), + targetEnd: target.Len(), + } + d.endpointBase = []int{d.extendFrom(editPoint{d.baseBegin, d.targetBegin}).base} + if d.baseEnd-d.baseBegin == d.targetEnd-d.targetBegin && d.endpointBase[0] == d.baseEnd { + // trivial case: base == target + d.finishIndex = 0 + } + return d +} + +func (d *quadraticSpaceMyersDiff) valuesEqual(baseIndex, targetIndex int) bool { + baseNull := d.base.IsNull(baseIndex) + targetNull := d.target.IsNull(targetIndex) + if baseNull || targetNull { + return baseNull && targetNull + } + return SliceEqual(d.base, int64(baseIndex), int64(baseIndex+1), d.target, int64(targetIndex), int64(targetIndex+1)) +} + +// increment the position within base and target (the elements skipped in this way were +// present in both sequences) +func (d *quadraticSpaceMyersDiff) extendFrom(p editPoint) editPoint { + for p.base != d.baseEnd && p.target != d.targetEnd { + if !d.valuesEqual(p.base, p.target) { + break + } + p.base++ + p.target++ + } + return p +} + +// increment the position within base (the element pointed to was deleted) +// then extend maximally +func (d *quadraticSpaceMyersDiff) deleteOne(p editPoint) editPoint { + if p.base != d.baseEnd { + p.base++ + } + return d.extendFrom(p) +} + +// increment the position within target (the element pointed to was inserted) +// then extend maximally +func (d *quadraticSpaceMyersDiff) insertOne(p editPoint) editPoint { + if p.target != d.targetEnd { + p.target++ + } + return d.extendFrom(p) +} + +// beginning of a range for storing per-edit state in endpointBase and insert +func storageOffset(editCount int) int { + return editCount * (editCount + 1) / 2 +} + +// given edit_count and index, augment endpointBase[index] with the corresponding +// position in target (which is only implicitly represented in editCount, index) +func (d *quadraticSpaceMyersDiff) getEditPoint(editCount, index int) editPoint { + insertionsMinusDeletions := 2*(index-storageOffset(editCount)) - editCount + maximalBase := d.endpointBase[index] + maximalTarget := min(d.targetBegin+((maximalBase-d.baseBegin)+insertionsMinusDeletions), d.targetEnd) + return editPoint{maximalBase, maximalTarget} +} + +func (d *quadraticSpaceMyersDiff) Next() { + d.editCount++ + if len(d.endpointBase) < storageOffset(d.editCount+1) { + d.endpointBase = append(d.endpointBase, make([]int, storageOffset(d.editCount+1)-len(d.endpointBase))...) + } + if len(d.insert) < storageOffset(d.editCount+1) { + d.insert = append(d.insert, make([]bool, storageOffset(d.editCount+1)-len(d.insert))...) + } + previousOffset := storageOffset(d.editCount - 1) + currentOffset := storageOffset(d.editCount) + + // try deleting from base first + for i, iOut := 0, 0; i < d.editCount; i, iOut = i+1, iOut+1 { + previousEndpoint := d.getEditPoint(d.editCount-1, i+previousOffset) + d.endpointBase[iOut+currentOffset] = d.deleteOne(previousEndpoint).base + } + + // check if inserting from target could do better + for i, iOut := 0, 1; i < d.editCount; i, iOut = i+1, iOut+1 { + // retrieve the previously computed best endpoint for (editCount, iOut) + // for comparison with the best endpoint achievable with an insertion + endpointAfterDeletion := d.getEditPoint(d.editCount, iOut+currentOffset) + + previousEndpoint := d.getEditPoint(d.editCount-1, i+previousOffset) + endpointAfterInsertion := d.insertOne(previousEndpoint) + + if endpointAfterInsertion.base-endpointAfterDeletion.base >= 0 { + // insertion was more efficient; keep it and mark the insertion in insert + d.insert[iOut+currentOffset] = true + d.endpointBase[iOut+currentOffset] = endpointAfterInsertion.base + } + } + + finish := editPoint{d.baseEnd, d.targetEnd} + for iOut := 0; iOut < d.editCount+1; iOut++ { + if d.getEditPoint(d.editCount, iOut+currentOffset) == finish { + d.finishIndex = iOut + currentOffset + return + } + } +} + +func (d *quadraticSpaceMyersDiff) Done() bool { + return d.finishIndex != -1 +} + +func (d *quadraticSpaceMyersDiff) GetEdits() (Edits, error) { + if !d.Done() { + panic("GetEdits called but Done() = false") + } + + length := d.editCount + 1 + edits := make(Edits, length) + index := d.finishIndex + endpoint := d.getEditPoint(d.editCount, d.finishIndex) + + for i := d.editCount; i > 0; i-- { + insert := d.insert[index] + edits[i].Insert = insert + insertionsMinusDeletions := (endpoint.base - d.baseBegin) - (endpoint.target - d.targetBegin) + if insert { + insertionsMinusDeletions++ + } else { + insertionsMinusDeletions-- + } + index = (i-1-insertionsMinusDeletions)/2 + storageOffset(i-1) + + // endpoint of previous edit + previous := d.getEditPoint(i-1, index) + in := 0 + if insert { + in = 1 + } + edits[i].RunLength = int64(endpoint.base - previous.base - (1 - in)) + endpoint = previous + } + edits[0].Insert = false + edits[0].RunLength = int64(endpoint.base - d.baseBegin) + + return edits, nil +} + +func (d *quadraticSpaceMyersDiff) Diff() (edits Edits, err error) { + for !d.Done() { + d.Next() + } + return d.GetEdits() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/doc.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/doc.go new file mode 100644 index 00000000..5cf85408 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/doc.go @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package array provides implementations of various Arrow array types. +*/ +package array diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go new file mode 100644 index 00000000..3571a971 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go @@ -0,0 +1,470 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/encoded" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/utils" + "github.com/goccy/go-json" +) + +// RunEndEncoded represents an array containing two children: +// an array of int32 values defining the ends of each run of values +// and an array of values +type RunEndEncoded struct { + array + + ends arrow.Array + values arrow.Array +} + +func NewRunEndEncodedArray(runEnds, values arrow.Array, logicalLength, offset int) *RunEndEncoded { + data := NewData(arrow.RunEndEncodedOf(runEnds.DataType(), values.DataType()), logicalLength, + []*memory.Buffer{nil}, []arrow.ArrayData{runEnds.Data(), values.Data()}, 0, offset) + defer data.Release() + return NewRunEndEncodedData(data) +} + +func NewRunEndEncodedData(data arrow.ArrayData) *RunEndEncoded { + r := &RunEndEncoded{} + r.refCount = 1 + r.setData(data.(*Data)) + return r +} + +func (r *RunEndEncoded) Values() arrow.Array { return r.values } +func (r *RunEndEncoded) RunEndsArr() arrow.Array { return r.ends } + +func (r *RunEndEncoded) Retain() { + r.array.Retain() + r.values.Retain() + r.ends.Retain() +} + +func (r *RunEndEncoded) Release() { + r.array.Release() + r.values.Release() + r.ends.Release() +} + +// LogicalValuesArray returns an array holding the values of each +// run, only over the range of run values inside the logical offset/length +// range of the parent array. +// +// Example +// +// For this array: +// RunEndEncoded: { Offset: 150, Length: 1500 } +// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] +// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] +// +// LogicalValuesArray will return the following array: +// [ "f", "g" ] +// +// This is because the offset of 150 tells it to skip the values until +// "f" which corresponds with the logical offset (the run from 10 - 1000), +// and stops after "g" because the length + offset goes to 1650 which is +// within the run from 1000 - 1750, corresponding to the "g" value. +// +// Note +// +// The return from this needs to be Released. +func (r *RunEndEncoded) LogicalValuesArray() arrow.Array { + physOffset := r.GetPhysicalOffset() + physLength := r.GetPhysicalLength() + data := NewSliceData(r.data.Children()[1], int64(physOffset), int64(physOffset+physLength)) + defer data.Release() + return MakeFromData(data) +} + +// LogicalRunEndsArray returns an array holding the logical indexes +// of each run end, only over the range of run end values relative +// to the logical offset/length range of the parent array. +// +// For arrays with an offset, this is not a slice of the existing +// internal run ends array. Instead a new array is created with run-ends +// that are adjusted so the new array can have an offset of 0. As a result +// this method can be expensive to call for an array with a non-zero offset. +// +// Example +// +// For this array: +// RunEndEncoded: { Offset: 150, Length: 1500 } +// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] +// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] +// +// LogicalRunEndsArray will return the following array: +// [ 850, 1500 ] +// +// This is because the offset of 150 tells us to skip all run-ends less +// than 150 (by finding the physical offset), and we adjust the run-ends +// accordingly (1000 - 150 = 850). The logical length of the array is 1500, +// so we know we don't want to go past the 1750 run end. Thus the last +// run-end is determined by doing: min(1750 - 150, 1500) = 1500. +// +// Note +// +// The return from this needs to be Released +func (r *RunEndEncoded) LogicalRunEndsArray(mem memory.Allocator) arrow.Array { + physOffset := r.GetPhysicalOffset() + physLength := r.GetPhysicalLength() + + if r.data.offset == 0 { + data := NewSliceData(r.data.childData[0], 0, int64(physLength)) + defer data.Release() + return MakeFromData(data) + } + + bldr := NewBuilder(mem, r.data.childData[0].DataType()) + defer bldr.Release() + bldr.Resize(physLength) + + switch e := r.ends.(type) { + case *Int16: + for _, v := range e.Int16Values()[physOffset : physOffset+physLength] { + v -= int16(r.data.offset) + v = int16(utils.MinInt(int(v), r.data.length)) + bldr.(*Int16Builder).Append(v) + } + case *Int32: + for _, v := range e.Int32Values()[physOffset : physOffset+physLength] { + v -= int32(r.data.offset) + v = int32(utils.MinInt(int(v), r.data.length)) + bldr.(*Int32Builder).Append(v) + } + case *Int64: + for _, v := range e.Int64Values()[physOffset : physOffset+physLength] { + v -= int64(r.data.offset) + v = int64(utils.MinInt(int(v), r.data.length)) + bldr.(*Int64Builder).Append(v) + } + } + + return bldr.NewArray() +} + +func (r *RunEndEncoded) setData(data *Data) { + if len(data.childData) != 2 { + panic(fmt.Errorf("%w: arrow/array: RLE array must have exactly 2 children", arrow.ErrInvalid)) + } + debug.Assert(data.dtype.ID() == arrow.RUN_END_ENCODED, "invalid type for RunLengthEncoded") + if !data.dtype.(*arrow.RunEndEncodedType).ValidRunEndsType(data.childData[0].DataType()) { + panic(fmt.Errorf("%w: arrow/array: run ends array must be int16, int32, or int64", arrow.ErrInvalid)) + } + if data.childData[0].NullN() > 0 { + panic(fmt.Errorf("%w: arrow/array: run ends array cannot contain nulls", arrow.ErrInvalid)) + } + + r.array.setData(data) + + r.ends = MakeFromData(r.data.childData[0]) + r.values = MakeFromData(r.data.childData[1]) +} + +func (r *RunEndEncoded) GetPhysicalOffset() int { + return encoded.FindPhysicalOffset(r.data) +} + +func (r *RunEndEncoded) GetPhysicalLength() int { + return encoded.GetPhysicalLength(r.data) +} + +func (r *RunEndEncoded) ValueStr(i int) string { + value := r.values.GetOneForMarshal(i) + if byts, ok := value.(json.RawMessage); ok { + value = string(byts) + } + return fmt.Sprintf("{%d -> %v}", + r.ends.GetOneForMarshal(i), + value) +} +func (r *RunEndEncoded) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := 0; i < r.ends.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + + value := r.values.GetOneForMarshal(i) + if byts, ok := value.(json.RawMessage); ok { + value = string(byts) + } + fmt.Fprintf(&buf, "{%d -> %v}", + r.ends.GetOneForMarshal(i), + value) + } + + buf.WriteByte(']') + return buf.String() +} + +func (r *RunEndEncoded) GetOneForMarshal(i int) interface{} { + physIndex := encoded.FindPhysicalIndex(r.data, i+r.data.offset) + return r.values.GetOneForMarshal(physIndex) +} + +func (r *RunEndEncoded) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + buf.WriteByte('[') + for i := 0; i < r.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(r.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayRunEndEncodedEqual(l, r *RunEndEncoded) bool { + // types were already checked before getting here, so we know + // the encoded types are equal + mr := encoded.NewMergedRuns([2]arrow.Array{l, r}) + for mr.Next() { + lIndex := mr.IndexIntoArray(0) + rIndex := mr.IndexIntoArray(1) + if !SliceEqual(l.values, lIndex, lIndex+1, r.values, rIndex, rIndex+1) { + return false + } + } + return true +} + +func arrayRunEndEncodedApproxEqual(l, r *RunEndEncoded, opt equalOption) bool { + // types were already checked before getting here, so we know + // the encoded types are equal + mr := encoded.NewMergedRuns([2]arrow.Array{l, r}) + for mr.Next() { + lIndex := mr.IndexIntoArray(0) + rIndex := mr.IndexIntoArray(1) + if !sliceApproxEqual(l.values, lIndex, lIndex+1, r.values, rIndex, rIndex+1, opt) { + return false + } + } + return true +} + +type RunEndEncodedBuilder struct { + builder + + dt arrow.DataType + runEnds Builder + values Builder + maxRunEnd uint64 + + lastUnmarshalled interface{} +} + +func NewRunEndEncodedBuilder(mem memory.Allocator, runEnds, encoded arrow.DataType) *RunEndEncodedBuilder { + dt := arrow.RunEndEncodedOf(runEnds, encoded) + if !dt.ValidRunEndsType(runEnds) { + panic("arrow/ree: invalid runEnds type for run length encoded array") + } + + var maxEnd uint64 + switch runEnds.ID() { + case arrow.INT16: + maxEnd = math.MaxInt16 + case arrow.INT32: + maxEnd = math.MaxInt32 + case arrow.INT64: + maxEnd = math.MaxInt64 + } + return &RunEndEncodedBuilder{ + builder: builder{refCount: 1, mem: mem}, + dt: dt, + runEnds: NewBuilder(mem, runEnds), + values: NewBuilder(mem, encoded), + maxRunEnd: maxEnd, + lastUnmarshalled: nil, + } +} + +func (b *RunEndEncodedBuilder) Type() arrow.DataType { + return b.dt +} + +func (b *RunEndEncodedBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + b.values.Release() + b.runEnds.Release() + } +} + +func (b *RunEndEncodedBuilder) addLength(n uint64) { + if uint64(b.length)+n > b.maxRunEnd { + panic(fmt.Errorf("%w: %s array length must fit be less than %d", arrow.ErrInvalid, b.dt, b.maxRunEnd)) + } + + b.length += int(n) +} + +func (b *RunEndEncodedBuilder) finishRun() { + b.lastUnmarshalled = nil + if b.length == 0 { + return + } + + switch bldr := b.runEnds.(type) { + case *Int16Builder: + bldr.Append(int16(b.length)) + case *Int32Builder: + bldr.Append(int32(b.length)) + case *Int64Builder: + bldr.Append(int64(b.length)) + } +} + +func (b *RunEndEncodedBuilder) ValueBuilder() Builder { return b.values } +func (b *RunEndEncodedBuilder) Append(n uint64) { + b.finishRun() + b.addLength(n) +} +func (b *RunEndEncodedBuilder) AppendRuns(runs []uint64) { + for _, r := range runs { + b.finishRun() + b.addLength(r) + } +} +func (b *RunEndEncodedBuilder) ContinueRun(n uint64) { + b.addLength(n) +} +func (b *RunEndEncodedBuilder) AppendNull() { + b.finishRun() + b.values.AppendNull() + b.addLength(1) +} + +func (b *RunEndEncodedBuilder) NullN() int { + return UnknownNullCount +} + +func (b *RunEndEncodedBuilder) AppendEmptyValue() { + b.AppendNull() +} + +func (b *RunEndEncodedBuilder) Reserve(n int) { + b.values.Reserve(n) + b.runEnds.Reserve(n) +} + +func (b *RunEndEncodedBuilder) Resize(n int) { + b.values.Resize(n) + b.runEnds.Resize(n) +} + +func (b *RunEndEncodedBuilder) NewRunEndEncodedArray() *RunEndEncoded { + data := b.newData() + defer data.Release() + return NewRunEndEncodedData(data) +} + +func (b *RunEndEncodedBuilder) NewArray() arrow.Array { + return b.NewRunEndEncodedArray() +} + +func (b *RunEndEncodedBuilder) newData() (data *Data) { + b.finishRun() + values := b.values.NewArray() + defer values.Release() + runEnds := b.runEnds.NewArray() + defer runEnds.Release() + + data = NewData( + b.dt, b.length, []*memory.Buffer{nil}, + []arrow.ArrayData{runEnds.Data(), values.Data()}, 0, 0) + b.reset() + return +} + +func (b *RunEndEncodedBuilder) AppendValueFromString(s string) error { + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + +func (b *RunEndEncodedBuilder) UnmarshalOne(dec *json.Decoder) error { + var value interface{} + if err := dec.Decode(&value); err != nil { + return err + } + + // if we unmarshalled the same value as the previous one, we want to + // continue the run. However, there's an edge case. At the start of + // unmarshalling, lastUnmarshalled will be nil, but we might get + // nil as the first value we unmarshal. In that case we want to + // make sure we add a new run instead. We can detect that case by + // checking that the number of runEnds matches the number of values + // we have, which means no matter what we have to start a new run + if reflect.DeepEqual(value, b.lastUnmarshalled) && (value != nil || b.runEnds.Len() != b.values.Len()) { + b.ContinueRun(1) + return nil + } + + data, err := json.Marshal(value) + if err != nil { + return err + } + + b.Append(1) + b.lastUnmarshalled = value + return b.ValueBuilder().UnmarshalOne(json.NewDecoder(bytes.NewReader(data))) +} + +func (b *RunEndEncodedBuilder) Unmarshal(dec *json.Decoder) error { + b.finishRun() + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *RunEndEncodedBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("list builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*RunEndEncoded)(nil) + _ Builder = (*RunEndEncodedBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go new file mode 100644 index 00000000..38f2630f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go @@ -0,0 +1,261 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "reflect" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// ExtensionArray is the interface that needs to be implemented to handle +// user-defined extension type arrays. In order to ensure consistency and +// proper behavior, all ExtensionArray types must embed ExtensionArrayBase +// in order to meet the interface which provides the default implementation +// and handling for the array while allowing custom behavior to be built +// on top of it. +type ExtensionArray interface { + arrow.Array + // ExtensionType returns the datatype as per calling DataType(), but + // already cast to ExtensionType + ExtensionType() arrow.ExtensionType + // Storage returns the underlying storage array for this array. + Storage() arrow.Array + // by having a non-exported function in the interface, it means that + // consumers must embed ExtensionArrayBase in their structs in order + // to fulfill this interface. + mustEmbedExtensionArrayBase() +} + +// two extension arrays are equal if their data types are equal and +// their underlying storage arrays are equal. +func arrayEqualExtension(l, r ExtensionArray) bool { + if !arrow.TypeEqual(l.DataType(), r.DataType()) { + return false + } + + return Equal(l.Storage(), r.Storage()) +} + +// two extension arrays are approximately equal if their data types are +// equal and their underlying storage arrays are approximately equal. +func arrayApproxEqualExtension(l, r ExtensionArray, opt equalOption) bool { + if !arrow.TypeEqual(l.DataType(), r.DataType()) { + return false + } + + return arrayApproxEqual(l.Storage(), r.Storage(), opt) +} + +// NewExtensionArrayWithStorage constructs a new ExtensionArray from the provided +// ExtensionType and uses the provided storage interface as the underlying storage. +// This will not release the storage array passed in so consumers should call Release +// on it manually while the new Extension array will share references to the underlying +// Data buffers. +func NewExtensionArrayWithStorage(dt arrow.ExtensionType, storage arrow.Array) arrow.Array { + if !arrow.TypeEqual(dt.StorageType(), storage.DataType()) { + panic(fmt.Errorf("arrow/array: storage type %s for extension type %s, does not match expected type %s", storage.DataType(), dt.ExtensionName(), dt.StorageType())) + } + + base := ExtensionArrayBase{} + base.refCount = 1 + base.storage = storage + storage.Retain() + + storageData := storage.Data().(*Data) + // create a new data instance with the ExtensionType as the datatype but referencing the + // same underlying buffers to share them with the storage array. + baseData := NewData(dt, storageData.length, storageData.buffers, storageData.childData, storageData.nulls, storageData.offset) + defer baseData.Release() + base.array.setData(baseData) + + // use the ExtensionType's ArrayType to construct the correctly typed object + // to use as the ExtensionArray interface. reflect.New returns a pointer to + // the newly created object. + arr := reflect.New(base.ExtensionType().ArrayType()) + // set the embedded ExtensionArrayBase to the value we created above. We know + // that this field will exist because the interface requires embedding ExtensionArrayBase + // so we don't have to separately check, this will panic if called on an ArrayType + // that doesn't embed ExtensionArrayBase which is what we want. + arr.Elem().FieldByName("ExtensionArrayBase").Set(reflect.ValueOf(base)) + return arr.Interface().(ExtensionArray) +} + +// NewExtensionData expects a data with a datatype of arrow.ExtensionType and +// underlying data built for the storage array. +func NewExtensionData(data arrow.ArrayData) ExtensionArray { + base := ExtensionArrayBase{} + base.refCount = 1 + base.setData(data.(*Data)) + + // use the ExtensionType's ArrayType to construct the correctly typed object + // to use as the ExtensionArray interface. reflect.New returns a pointer to + // the newly created object. + arr := reflect.New(base.ExtensionType().ArrayType()) + // set the embedded ExtensionArrayBase to the value we created above. We know + // that this field will exist because the interface requires embedding ExtensionArrayBase + // so we don't have to separately check, this will panic if called on an ArrayType + // that doesn't embed ExtensionArrayBase which is what we want. + arr.Elem().FieldByName("ExtensionArrayBase").Set(reflect.ValueOf(base)) + return arr.Interface().(ExtensionArray) +} + +// ExtensionArrayBase is the base struct for user-defined Extension Array types +// and must be embedded in any user-defined extension arrays like so: +// +// type UserDefinedArray struct { +// array.ExtensionArrayBase +// } +// +type ExtensionArrayBase struct { + array + storage arrow.Array +} + +func (e *ExtensionArrayBase) String() string { + return fmt.Sprintf("(%s)%s", e.data.dtype, e.storage) +} + +func (e *ExtensionArrayBase) GetOneForMarshal(i int) interface{} { + return e.storage.GetOneForMarshal(i) +} + +func (e *ExtensionArrayBase) MarshalJSON() ([]byte, error) { + return json.Marshal(e.storage) +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (e *ExtensionArrayBase) Retain() { + e.array.Retain() + e.storage.Retain() +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (e *ExtensionArrayBase) Release() { + e.array.Release() + e.storage.Release() +} + +// Storage returns the underlying storage array +func (e *ExtensionArrayBase) Storage() arrow.Array { return e.storage } + +// ExtensionType returns the same thing as DataType, just already casted +// to an ExtensionType interface for convenience. +func (e *ExtensionArrayBase) ExtensionType() arrow.ExtensionType { + return e.DataType().(arrow.ExtensionType) +} + +func (e *ExtensionArrayBase) setData(data *Data) { + if data.DataType().ID() != arrow.EXTENSION { + panic("arrow/array: must use extension type to construct an extension array") + } + extType, ok := data.dtype.(arrow.ExtensionType) + if !ok { + panic("arrow/array: DataType for ExtensionArray must implement arrow.ExtensionType") + } + + e.array.setData(data) + // our underlying storage needs to reference the same data buffers (no copying) + // but should have the storage type's datatype, so we create a Data for it. + storageData := NewData(extType.StorageType(), data.length, data.buffers, data.childData, data.nulls, data.offset) + storageData.SetDictionary(data.dictionary) + defer storageData.Release() + e.storage = MakeFromData(storageData) +} + +// ValueStr returns the value at index i as a string. +// This needs to be implemented by the extension array type. +func (e *ExtensionArrayBase) ValueStr(i int) string { + panic("arrow/array: ValueStr wasn't implemented by this extension array type") +} + +// no-op function that exists simply to force embedding this in any extension array types. +func (ExtensionArrayBase) mustEmbedExtensionArrayBase() {} + +// ExtensionBuilder is a convenience builder so that NewBuilder and such will still work +// with extension types properly. Depending on preference it may be cleaner or easier to just use +// NewExtensionArrayWithStorage and pass a storage array. +// +// That said, this allows easily building an extension array by providing the extension +// type and retrieving the storage builder. +type ExtensionBuilder struct { + Builder + dt arrow.ExtensionType +} + +// NewExtensionBuilder returns a builder using the provided memory allocator for the desired +// extension type. It will internally construct a builder of the storage type for the extension +// type and keep a copy of the extension type. The underlying type builder can then be retrieved +// by calling `StorageBuilder` on this and then type asserting it to the desired builder type. +// +// After using the storage builder, calling NewArray or NewExtensionArray will construct +// the appropriate extension array type and set the storage correctly, resetting the builder for +// reuse. +// +// Example +// +// Simple example assuming an extension type of a UUID defined as a FixedSizeBinary(16) was registered +// using the type name "uuid": +// +// uuidType := arrow.GetExtensionType("uuid") +// bldr := array.NewExtensionBuilder(memory.DefaultAllocator, uuidType) +// defer bldr.Release() +// uuidBldr := bldr.StorageBuilder().(*array.FixedSizeBinaryBuilder) +// /* build up the fixed size binary array as usual via Append/AppendValues */ +// uuidArr := bldr.NewExtensionArray() +// defer uuidArr.Release() +// +// Because the storage builder is embedded in the Extension builder it also means +// that any of the functions available on the Builder interface can be called on +// an instance of ExtensionBuilder and will respond appropriately as the storage +// builder would for generically grabbing the Lenth, Cap, Nulls, reserving, etc. +func NewExtensionBuilder(mem memory.Allocator, dt arrow.ExtensionType) *ExtensionBuilder { + return &ExtensionBuilder{Builder: NewBuilder(mem, dt.StorageType()), dt: dt} +} + +func (b *ExtensionBuilder) Type() arrow.DataType { return b.dt } + +// StorageBuilder returns the builder for the underlying storage type. +func (b *ExtensionBuilder) StorageBuilder() Builder { return b.Builder } + +// NewArray creates a new array from the memory buffers used by the builder +// and resets the builder so it can be used to build a new array. +func (b *ExtensionBuilder) NewArray() arrow.Array { + return b.NewExtensionArray() +} + +// NewExtensionArray creates an Extension array from the memory buffers used +// by the builder and resets the ExtensionBuilder so it can be used to build +// a new ExtensionArray of the same type. +func (b *ExtensionBuilder) NewExtensionArray() ExtensionArray { + storage := b.Builder.NewArray() + defer storage.Release() + + storage.Data().(*Data).dtype = b.dt + return NewExtensionData(storage.Data()) +} + +var ( + _ arrow.Array = (ExtensionArray)(nil) + _ Builder = (*ExtensionBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/extension_builder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/extension_builder.go new file mode 100644 index 00000000..a71287fa --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/extension_builder.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +// ExtensionBuilderWrapper is an interface that you need to implement in your custom extension type if you want to provide a customer builder as well. +// See example in ./arrow/internal/testing/types/extension_types.go +type ExtensionBuilderWrapper interface { + NewBuilder(bldr *ExtensionBuilder) Builder +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go new file mode 100644 index 00000000..c0c8676c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go @@ -0,0 +1,349 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// FixedSizeList represents an immutable sequence of N array values. +type FixedSizeList struct { + array + n int32 + values arrow.Array +} + +// NewFixedSizeListData returns a new List array value, from data. +func NewFixedSizeListData(data arrow.ArrayData) *FixedSizeList { + a := &FixedSizeList{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *FixedSizeList) ListValues() arrow.Array { return a.values } + +func (a *FixedSizeList) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} +func (a *FixedSizeList) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if !a.IsValid(i) { + o.WriteString("(null)") + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *FixedSizeList) newListValue(i int) arrow.Array { + n := int64(a.n) + off := int64(a.array.data.offset) + beg := (off + int64(i)) * n + end := (off + int64(i+1)) * n + sli := NewSlice(a.values, beg, end) + return sli +} + +func (a *FixedSizeList) setData(data *Data) { + a.array.setData(data) + a.n = a.DataType().(*arrow.FixedSizeListType).Len() + a.values = MakeFromData(data.childData[0]) +} + +func arrayEqualFixedSizeList(left, right *FixedSizeList) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *FixedSizeList) Len() int { return a.array.Len() } + +func (a *FixedSizeList) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *FixedSizeList) Release() { + a.array.Release() + a.values.Release() +} + +func (a *FixedSizeList) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + + return json.RawMessage(v) +} + +func (a *FixedSizeList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if a.IsNull(i) { + enc.Encode(nil) + continue + } + + slice := a.newListValue(i) + if err := enc.Encode(slice); err != nil { + return nil, err + } + slice.Release() + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +type FixedSizeListBuilder struct { + builder + + etype arrow.DataType // data type of the list's elements. + n int32 // number of elements in the fixed-size list. + values Builder // value builder for the list's elements. +} + +// NewFixedSizeListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewFixedSizeListBuilder(mem memory.Allocator, n int32, etype arrow.DataType) *FixedSizeListBuilder { + return &FixedSizeListBuilder{ + builder: builder{refCount: 1, mem: mem}, + etype: etype, + n: n, + values: NewBuilder(mem, etype), + } +} + +func (b *FixedSizeListBuilder) Type() arrow.DataType { return arrow.FixedSizeListOf(b.n, b.etype) } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *FixedSizeListBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *FixedSizeListBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) +} + +func (b *FixedSizeListBuilder) AppendNull() { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(false) +} + +func (b *FixedSizeListBuilder) AppendEmptyValue() { + b.Append(true) + for i := int32(0); i < b.n; i++ { + b.values.AppendEmptyValue() + } +} + +func (b *FixedSizeListBuilder) AppendValues(valid []bool) { + b.Reserve(len(valid)) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *FixedSizeListBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *FixedSizeListBuilder) init(capacity int) { + b.builder.init(capacity) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *FixedSizeListBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *FixedSizeListBuilder) Resize(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *FixedSizeListBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder +// so it can be used to build a new array. +func (b *FixedSizeListBuilder) NewArray() arrow.Array { + return b.NewListArray() +} + +// NewListArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder +// so it can be used to build a new array. +func (b *FixedSizeListBuilder) NewListArray() (a *FixedSizeList) { + data := b.newData() + a = NewFixedSizeListData(data) + data.Release() + return +} + +func (b *FixedSizeListBuilder) newData() (data *Data) { + values := b.values.NewArray() + defer values.Release() + + data = NewData( + arrow.FixedSizeListOf(b.n, b.etype), b.length, + []*memory.Buffer{b.nullBitmap}, + []arrow.ArrayData{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + + +func (b *FixedSizeListBuilder) AppendValueFromString(s string) error { + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + +func (b *FixedSizeListBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + b.Append(true) + if err := b.values.Unmarshal(dec); err != nil { + return err + } + // consume ']' + _, err := dec.Token() + return err + case nil: + b.AppendNull() + for i := int32(0); i < b.n; i++ { + b.values.AppendNull() + } + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Struct: arrow.FixedSizeListOf(b.n, b.etype).String(), + } + } + + return nil +} + +func (b *FixedSizeListBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *FixedSizeListBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("fixed size list builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*FixedSizeList)(nil) + _ Builder = (*FixedSizeListBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go new file mode 100644 index 00000000..51e0b974 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "encoding/base64" + "fmt" + "strings" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/goccy/go-json" +) + +// A type which represents an immutable sequence of fixed-length binary strings. +type FixedSizeBinary struct { + array + + valueBytes []byte + bytewidth int32 +} + +// NewFixedSizeBinaryData constructs a new fixed-size binary array from data. +func NewFixedSizeBinaryData(data arrow.ArrayData) *FixedSizeBinary { + a := &FixedSizeBinary{bytewidth: int32(data.DataType().(arrow.FixedWidthDataType).BitWidth() / 8)} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Value returns the fixed-size slice at index i. This value should not be mutated. +func (a *FixedSizeBinary) Value(i int) []byte { + i += a.array.data.offset + var ( + bw = int(a.bytewidth) + beg = i * bw + end = (i + 1) * bw + ) + return a.valueBytes[beg:end] +} +func (a *FixedSizeBinary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} + +func (a *FixedSizeBinary) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *FixedSizeBinary) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.valueBytes = vals.Bytes() + } + +} + +func (a *FixedSizeBinary) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.Value(i) +} + +func (a *FixedSizeBinary) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = a.Value(i) + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualFixedSizeBinary(left, right *FixedSizeBinary) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !bytes.Equal(left.Value(i), right.Value(i)) { + return false + } + } + return true +} + +var ( + _ arrow.Array = (*FixedSizeBinary)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go new file mode 100644 index 00000000..02f355ee --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go @@ -0,0 +1,249 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// A FixedSizeBinaryBuilder is used to build a FixedSizeBinary array using the Append methods. +type FixedSizeBinaryBuilder struct { + builder + + dtype *arrow.FixedSizeBinaryType + values *byteBufferBuilder +} + +func NewFixedSizeBinaryBuilder(mem memory.Allocator, dtype *arrow.FixedSizeBinaryType) *FixedSizeBinaryBuilder { + b := &FixedSizeBinaryBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + values: newByteBufferBuilder(mem), + } + return b +} + +func (b *FixedSizeBinaryBuilder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *FixedSizeBinaryBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.values != nil { + b.values.Release() + b.values = nil + } + } +} + +func (b *FixedSizeBinaryBuilder) Append(v []byte) { + if len(v) != b.dtype.ByteWidth { + // TODO(alexandre): should we return an error instead? + panic("len(v) != b.dtype.ByteWidth") + } + + b.Reserve(1) + b.values.Append(v) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *FixedSizeBinaryBuilder) AppendNull() { + b.Reserve(1) + b.values.Advance(b.dtype.ByteWidth) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *FixedSizeBinaryBuilder) AppendEmptyValue() { + b.Reserve(1) + b.values.Advance(b.dtype.ByteWidth) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *FixedSizeBinaryBuilder) UnsafeAppend(v []byte) { + b.values.unsafeAppend(v) + b.UnsafeAppendBoolToBitmap(true) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *FixedSizeBinaryBuilder) AppendValues(v [][]byte, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + for _, vv := range v { + switch len(vv) { + case 0: + b.values.Advance(b.dtype.ByteWidth) + case b.dtype.ByteWidth: + b.values.Append(vv) + default: + panic(fmt.Errorf("array: invalid binary length (got=%d, want=%d)", len(vv), b.dtype.ByteWidth)) + } + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *FixedSizeBinaryBuilder) init(capacity int) { + b.builder.init(capacity) + b.values.resize(capacity * b.dtype.ByteWidth) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *FixedSizeBinaryBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *FixedSizeBinaryBuilder) Resize(n int) { + b.builder.resize(n, b.init) +} + +// NewArray creates a FixedSizeBinary array from the memory buffers used by the +// builder and resets the FixedSizeBinaryBuilder so it can be used to build a new array. +func (b *FixedSizeBinaryBuilder) NewArray() arrow.Array { + return b.NewFixedSizeBinaryArray() +} + +// NewFixedSizeBinaryArray creates a FixedSizeBinary array from the memory buffers used by the builder and resets the FixedSizeBinaryBuilder +// so it can be used to build a new array. +func (b *FixedSizeBinaryBuilder) NewFixedSizeBinaryArray() (a *FixedSizeBinary) { + data := b.newData() + a = NewFixedSizeBinaryData(data) + data.Release() + return +} + +func (b *FixedSizeBinaryBuilder) newData() (data *Data) { + values := b.values.Finish() + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, values}, nil, b.nulls, 0) + + if values != nil { + values.Release() + } + + b.builder.reset() + + return +} + +func (b *FixedSizeBinaryBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b.AppendNull() + return err + } + b.Append(data) + return nil +} + +func (b *FixedSizeBinaryBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + var val []byte + switch v := t.(type) { + case string: + data, err := base64.RawStdEncoding.DecodeString(v) + if err != nil { + return err + } + val = data + case []byte: + val = v + case nil: + b.AppendNull() + return nil + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf([]byte{}), + Offset: dec.InputOffset(), + Struct: fmt.Sprintf("FixedSizeBinary[%d]", b.dtype.ByteWidth), + } + } + + if len(val) != b.dtype.ByteWidth { + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(val), + Type: reflect.TypeOf([]byte{}), + Offset: dec.InputOffset(), + Struct: fmt.Sprintf("FixedSizeBinary[%d]", b.dtype.ByteWidth), + } + } + b.Append(val) + return nil +} + +func (b *FixedSizeBinaryBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *FixedSizeBinaryBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("fixed size binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ Builder = (*FixedSizeBinaryBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go new file mode 100644 index 00000000..252a81fc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/goccy/go-json" +) + +// A type which represents an immutable sequence of Float16 values. +type Float16 struct { + array + values []float16.Num +} + +func NewFloat16Data(data arrow.ArrayData) *Float16 { + a := &Float16{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Float16) Value(i int) float16.Num { return a.values[i] } +func (a *Float16) ValueStr(i int) string { return a.Value(i).String()} + +func (a *Float16) Values() []float16.Num { return a.values } + +func (a *Float16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", a.values[i].Float32()) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Float16) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.values[i].Float32() + } + return nil +} + +func (a *Float16) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i, v := range a.values { + if a.IsValid(i) { + vals[i] = v.Float32() + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualFloat16(left, right *Float16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +var ( + _ arrow.Array = (*Float16)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go new file mode 100644 index 00000000..77efb4d4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go @@ -0,0 +1,251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +type Float16Builder struct { + builder + + data *memory.Buffer + rawData []float16.Num +} + +func NewFloat16Builder(mem memory.Allocator) *Float16Builder { + return &Float16Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Float16Builder) Type() arrow.DataType { return arrow.FixedWidthTypes.Float16 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float16Builder) Append(v float16.Num) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float16Builder) UnsafeAppend(v float16.Num) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float16Builder) AppendEmptyValue() { + b.Reserve(1) + b.UnsafeAppend(float16.Num{}) +} + +func (b *Float16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float16Builder) AppendValues(v []float16.Num, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + arrow.Float16Traits.Copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float16Traits.BytesRequired(n)) + b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder +// so it can be used to build a new array. +func (b *Float16Builder) NewArray() arrow.Array { + return b.NewFloat16Array() +} + +// NewFloat16Array creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder +// so it can be used to build a new array. +func (b *Float16Builder) NewFloat16Array() (a *Float16) { + data := b.newData() + a = NewFloat16Data(data) + data.Release() + return +} + +func (b *Float16Builder) newData() (data *Data) { + bytesRequired := arrow.Float16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.Float16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Float16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 32) + if err != nil { + b.AppendNull() + return err + } + b.Append(float16.New(float32(v))) + return nil +} + +func (b *Float16Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case float64: + b.Append(float16.New(float32(v))) + case string: + f, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + // this will currently silently truncate if it is too large + b.Append(float16.New(float32(f))) + case json.Number: + f, err := v.Float64() + if err != nil { + return err + } + b.Append(float16.New(float32(f))) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(float16.Num{}), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *Float16Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON will add values to this builder from unmarshalling the +// array of values. Currently values that are larger than a float16 will +// be silently truncated. +func (b *Float16Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("float16 builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go new file mode 100644 index 00000000..cd8eaae0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go @@ -0,0 +1,908 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +func NewIntervalData(data arrow.ArrayData) arrow.Array { + switch data.DataType().(type) { + case *arrow.MonthIntervalType: + return NewMonthIntervalData(data.(*Data)) + case *arrow.DayTimeIntervalType: + return NewDayTimeIntervalData(data.(*Data)) + case *arrow.MonthDayNanoIntervalType: + return NewMonthDayNanoIntervalData(data.(*Data)) + default: + panic(fmt.Errorf("arrow/array: unknown interval data type %T", data.DataType())) + } +} + +// A type which represents an immutable sequence of arrow.MonthInterval values. +type MonthInterval struct { + array + values []arrow.MonthInterval +} + +func NewMonthIntervalData(data arrow.ArrayData) *MonthInterval { + a := &MonthInterval{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *MonthInterval) Value(i int) arrow.MonthInterval { return a.values[i] } +func (a *MonthInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return fmt.Sprintf("%v", a.Value(i)) +} +func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.values } + +func (a *MonthInterval) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *MonthInterval) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.MonthIntervalTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *MonthInterval) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.values[i] + } + return nil +} + +// MarshalJSON will create a json array out of a MonthInterval array, +// each value will be an object of the form {"months": #} where +// # is the numeric value of that index +func (a *MonthInterval) MarshalJSON() ([]byte, error) { + if a.NullN() == 0 { + return json.Marshal(a.values) + } + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = a.values[i] + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualMonthInterval(left, right *MonthInterval) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type MonthIntervalBuilder struct { + builder + + data *memory.Buffer + rawData []arrow.MonthInterval +} + +func NewMonthIntervalBuilder(mem memory.Allocator) *MonthIntervalBuilder { + return &MonthIntervalBuilder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *MonthIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.MonthInterval } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *MonthIntervalBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *MonthIntervalBuilder) Append(v arrow.MonthInterval) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *MonthIntervalBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *MonthIntervalBuilder) AppendEmptyValue() { + b.Append(arrow.MonthInterval(0)) +} + +func (b *MonthIntervalBuilder) UnsafeAppend(v arrow.MonthInterval) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *MonthIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *MonthIntervalBuilder) AppendValues(v []arrow.MonthInterval, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.MonthIntervalTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *MonthIntervalBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.MonthIntervalTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *MonthIntervalBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *MonthIntervalBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.MonthIntervalTraits.BytesRequired(n)) + b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder +// so it can be used to build a new array. +func (b *MonthIntervalBuilder) NewArray() arrow.Array { + return b.NewMonthIntervalArray() +} + +// NewMonthIntervalArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder +// so it can be used to build a new array. +func (b *MonthIntervalBuilder) NewMonthIntervalArray() (a *MonthInterval) { + data := b.newData() + a = NewMonthIntervalData(data) + data.Release() + return +} + +func (b *MonthIntervalBuilder) newData() (data *Data) { + bytesRequired := arrow.MonthIntervalTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.MonthInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *MonthIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.MonthInterval(v)) + return nil +} + +func (b *MonthIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { + var v *arrow.MonthInterval + if err := dec.Decode(&v); err != nil { + return err + } + + if v == nil { + b.AppendNull() + } else { + b.Append(*v) + } + return nil +} + +func (b *MonthIntervalBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON will add the unmarshalled values of an array to the builder, +// values are expected to be strings of the form "#months" where # is the int32 +// value that will be added to the builder. +func (b *MonthIntervalBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("month interval builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +// A type which represents an immutable sequence of arrow.DayTimeInterval values. +type DayTimeInterval struct { + array + values []arrow.DayTimeInterval +} + +func NewDayTimeIntervalData(data arrow.ArrayData) *DayTimeInterval { + a := &DayTimeInterval{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *DayTimeInterval) Value(i int) arrow.DayTimeInterval { return a.values[i] } +func (a *DayTimeInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return fmt.Sprintf("%q", a.Value(i)) +} +func (a *DayTimeInterval) DayTimeIntervalValues() []arrow.DayTimeInterval { return a.values } + +func (a *DayTimeInterval) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *DayTimeInterval) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.DayTimeIntervalTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *DayTimeInterval) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.values[i] + } + return nil +} + +// MarshalJSON will marshal this array to JSON as an array of objects, +// consisting of the form {"days": #, "milliseconds": #} for each element. +func (a *DayTimeInterval) MarshalJSON() ([]byte, error) { + if a.NullN() == 0 { + return json.Marshal(a.values) + } + vals := make([]interface{}, a.Len()) + for i, v := range a.values { + if a.IsValid(i) { + vals[i] = v + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualDayTimeInterval(left, right *DayTimeInterval) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type DayTimeIntervalBuilder struct { + builder + + data *memory.Buffer + rawData []arrow.DayTimeInterval +} + +func NewDayTimeIntervalBuilder(mem memory.Allocator) *DayTimeIntervalBuilder { + return &DayTimeIntervalBuilder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *DayTimeIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.DayTimeInterval } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *DayTimeIntervalBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *DayTimeIntervalBuilder) Append(v arrow.DayTimeInterval) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *DayTimeIntervalBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *DayTimeIntervalBuilder) AppendEmptyValue() { + b.Append(arrow.DayTimeInterval{}) +} + +func (b *DayTimeIntervalBuilder) UnsafeAppend(v arrow.DayTimeInterval) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *DayTimeIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *DayTimeIntervalBuilder) AppendValues(v []arrow.DayTimeInterval, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.DayTimeIntervalTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *DayTimeIntervalBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.DayTimeIntervalTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *DayTimeIntervalBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *DayTimeIntervalBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.DayTimeIntervalTraits.BytesRequired(n)) + b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder +// so it can be used to build a new array. +func (b *DayTimeIntervalBuilder) NewArray() arrow.Array { + return b.NewDayTimeIntervalArray() +} + +// NewDayTimeIntervalArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder +// so it can be used to build a new array. +func (b *DayTimeIntervalBuilder) NewDayTimeIntervalArray() (a *DayTimeInterval) { + data := b.newData() + a = NewDayTimeIntervalData(data) + data.Release() + return +} + +func (b *DayTimeIntervalBuilder) newData() (data *Data) { + bytesRequired := arrow.DayTimeIntervalTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.DayTimeInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *DayTimeIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + var v arrow.DayTimeInterval + if err := json.Unmarshal([]byte(s), &v); err != nil { + b.AppendNull() + return err + } + b.Append(v) + return nil +} + +func (b *DayTimeIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { + var v *arrow.DayTimeInterval + if err := dec.Decode(&v); err != nil { + return err + } + + if v == nil { + b.AppendNull() + } else { + b.Append(*v) + } + return nil +} + +func (b *DayTimeIntervalBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON will add the values unmarshalled from an array to the builder, +// with the values expected to be objects of the form {"days": #, "milliseconds": #} +func (b *DayTimeIntervalBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("day_time interval builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +// A type which represents an immutable sequence of arrow.DayTimeInterval values. +type MonthDayNanoInterval struct { + array + values []arrow.MonthDayNanoInterval +} + +func NewMonthDayNanoIntervalData(data arrow.ArrayData) *MonthDayNanoInterval { + a := &MonthDayNanoInterval{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *MonthDayNanoInterval) Value(i int) arrow.MonthDayNanoInterval { return a.values[i] } +func (a *MonthDayNanoInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return fmt.Sprintf("%q", a.Value(i)) +} + +func (a *MonthDayNanoInterval) MonthDayNanoIntervalValues() []arrow.MonthDayNanoInterval { + return a.values +} + +func (a *MonthDayNanoInterval) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *MonthDayNanoInterval) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.MonthDayNanoIntervalTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *MonthDayNanoInterval) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.values[i] + } + return nil +} + +// MarshalJSON will marshal this array to a JSON array with elements +// marshalled to the form {"months": #, "days": #, "nanoseconds": #} +func (a *MonthDayNanoInterval) MarshalJSON() ([]byte, error) { + if a.NullN() == 0 { + return json.Marshal(a.values) + } + vals := make([]interface{}, a.Len()) + for i, v := range a.values { + if a.IsValid(i) { + vals[i] = v + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualMonthDayNanoInterval(left, right *MonthDayNanoInterval) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type MonthDayNanoIntervalBuilder struct { + builder + + data *memory.Buffer + rawData []arrow.MonthDayNanoInterval +} + +func NewMonthDayNanoIntervalBuilder(mem memory.Allocator) *MonthDayNanoIntervalBuilder { + return &MonthDayNanoIntervalBuilder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *MonthDayNanoIntervalBuilder) Type() arrow.DataType { + return arrow.FixedWidthTypes.MonthDayNanoInterval +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *MonthDayNanoIntervalBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *MonthDayNanoIntervalBuilder) Append(v arrow.MonthDayNanoInterval) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *MonthDayNanoIntervalBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *MonthDayNanoIntervalBuilder) AppendEmptyValue() { + b.Append(arrow.MonthDayNanoInterval{}) +} + +func (b *MonthDayNanoIntervalBuilder) UnsafeAppend(v arrow.MonthDayNanoInterval) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *MonthDayNanoIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *MonthDayNanoIntervalBuilder) AppendValues(v []arrow.MonthDayNanoInterval, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.MonthDayNanoIntervalTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *MonthDayNanoIntervalBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.MonthDayNanoIntervalTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.MonthDayNanoIntervalTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *MonthDayNanoIntervalBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *MonthDayNanoIntervalBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.MonthDayNanoIntervalTraits.BytesRequired(n)) + b.rawData = arrow.MonthDayNanoIntervalTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a MonthDayNanoInterval array from the memory buffers used by the builder and resets the MonthDayNanoIntervalBuilder +// so it can be used to build a new array. +func (b *MonthDayNanoIntervalBuilder) NewArray() arrow.Array { + return b.NewMonthDayNanoIntervalArray() +} + +// NewMonthDayNanoIntervalArray creates a MonthDayNanoInterval array from the memory buffers used by the builder and resets the MonthDayNanoIntervalBuilder +// so it can be used to build a new array. +func (b *MonthDayNanoIntervalBuilder) NewMonthDayNanoIntervalArray() (a *MonthDayNanoInterval) { + data := b.newData() + a = NewMonthDayNanoIntervalData(data) + data.Release() + return +} + +func (b *MonthDayNanoIntervalBuilder) newData() (data *Data) { + bytesRequired := arrow.MonthDayNanoIntervalTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.FixedWidthTypes.MonthDayNanoInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *MonthDayNanoIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + var v arrow.MonthDayNanoInterval + if err := json.Unmarshal([]byte(s), &v); err != nil { + return err + } + b.Append(v) + return nil +} + +func (b *MonthDayNanoIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { + var v *arrow.MonthDayNanoInterval + if err := dec.Decode(&v); err != nil { + return err + } + + if v == nil { + b.AppendNull() + } else { + b.Append(*v) + } + return nil +} + +func (b *MonthDayNanoIntervalBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +// UnmarshalJSON unmarshals a JSON array of objects and adds them to this builder, +// each element of the array is expected to be an object of the form +// {"months": #, "days": #, "nanoseconds": #} +func (b *MonthDayNanoIntervalBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("month_day_nano interval builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*MonthInterval)(nil) + _ arrow.Array = (*DayTimeInterval)(nil) + _ arrow.Array = (*MonthDayNanoInterval)(nil) + + _ Builder = (*MonthIntervalBuilder)(nil) + _ Builder = (*DayTimeIntervalBuilder)(nil) + _ Builder = (*MonthDayNanoIntervalBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go new file mode 100644 index 00000000..100fdd91 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go @@ -0,0 +1,205 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +type Option func(config) +type config interface{} + +// WithChunk sets the chunk size for reading in json records. The default is to +// read in one row per record batch as a single object. If chunk size is set to +// a negative value, then the entire file is read as a single record batch. +// Otherwise a record batch is read in with chunk size rows per record batch until +// it reaches EOF. +func WithChunk(n int) Option { + return func(cfg config) { + switch cfg := cfg.(type) { + case *JSONReader: + cfg.chunk = n + default: + panic(fmt.Errorf("arrow/json): unknown config type %T", cfg)) + } + } +} + +// WithAllocator specifies the allocator to use for creating the record batches, +// if it is not called, then memory.DefaultAllocator will be used. +func WithAllocator(mem memory.Allocator) Option { + return func(cfg config) { + switch cfg := cfg.(type) { + case *JSONReader: + cfg.mem = mem + default: + panic(fmt.Errorf("arrow/json): unknown config type %T", cfg)) + } + } +} + +// JSONReader is a json reader that meets the RecordReader interface definition. +// +// To read in an array of objects as a record, you can use RecordFromJSON +// which is equivalent to reading the json as a struct array whose fields are +// the columns of the record. This primarily exists to fit the RecordReader +// interface as a matching reader for the csv reader. +type JSONReader struct { + r *json.Decoder + schema *arrow.Schema + + bldr *RecordBuilder + + refs int64 + cur arrow.Record + err error + + chunk int + done bool + + mem memory.Allocator + next func() bool +} + +// NewJSONReader returns a json RecordReader which expects to find one json object +// per row of dataset. Using WithChunk can control how many rows are processed +// per record, which is how many objects become a single record from the file. +// +// If it is desired to write out an array of rows, then simply use RecordToStructArray +// and json.Marshal the struct array for the same effect. +func NewJSONReader(r io.Reader, schema *arrow.Schema, opts ...Option) *JSONReader { + rr := &JSONReader{ + r: json.NewDecoder(r), + schema: schema, + refs: 1, + chunk: 1, + } + for _, o := range opts { + o(rr) + } + + if rr.mem == nil { + rr.mem = memory.DefaultAllocator + } + + rr.bldr = NewRecordBuilder(rr.mem, schema) + switch { + case rr.chunk < 0: + rr.next = rr.nextall + case rr.chunk > 1: + rr.next = rr.nextn + default: + rr.next = rr.next1 + } + return rr +} + +// Err returns the last encountered error +func (r *JSONReader) Err() error { return r.err } + +func (r *JSONReader) Schema() *arrow.Schema { return r.schema } + +// Record returns the last read in record. The returned record is only valid +// until the next call to Next unless Retain is called on the record itself. +func (r *JSONReader) Record() arrow.Record { return r.cur } + +func (r *JSONReader) Retain() { + atomic.AddInt64(&r.refs, 1) +} + +func (r *JSONReader) Release() { + debug.Assert(atomic.LoadInt64(&r.refs) > 0, "too many releases") + + if atomic.AddInt64(&r.refs, -1) == 0 { + if r.cur != nil { + r.cur.Release() + r.bldr.Release() + r.r = nil + } + } +} + +// Next returns true if it read in a record, which will be available via Record +// and false if there is either an error or the end of the reader. +func (r *JSONReader) Next() bool { + if r.cur != nil { + r.cur.Release() + r.cur = nil + } + + if r.err != nil || r.done { + return false + } + + return r.next() +} + +func (r *JSONReader) readNext() bool { + r.err = r.r.Decode(r.bldr) + if r.err != nil { + r.done = true + if errors.Is(r.err, io.EOF) { + r.err = nil + } + return false + } + return true +} + +func (r *JSONReader) nextall() bool { + for r.readNext() { + } + + r.cur = r.bldr.NewRecord() + return r.cur.NumRows() > 0 +} + +func (r *JSONReader) next1() bool { + if !r.readNext() { + return false + } + + r.cur = r.bldr.NewRecord() + return true +} + +func (r *JSONReader) nextn() bool { + var n = 0 + + for i := 0; i < r.chunk && !r.done; i, n = i+1, n+1 { + if !r.readNext() { + break + } + } + + if n > 0 { + r.cur = r.bldr.NewRecord() + } + return n > 0 +} + +var ( + _ RecordReader = (*JSONReader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go new file mode 100644 index 00000000..374ed281 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go @@ -0,0 +1,607 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +type ListLike interface { + arrow.Array + ListValues() arrow.Array + ValueOffsets(i int) (start, end int64) +} + +// List represents an immutable sequence of array values. +type List struct { + array + values arrow.Array + offsets []int32 +} + +// NewListData returns a new List array value, from data. +func NewListData(data arrow.ArrayData) *List { + a := &List{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *List) ListValues() arrow.Array { return a.values } + +func (a *List) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} + +func (a *List) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if !a.IsValid(i) { + o.WriteString("(null)") + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *List) newListValue(i int) arrow.Array { + j := i + a.array.data.offset + beg := int64(a.offsets[j]) + end := int64(a.offsets[j+1]) + return NewSlice(a.values, beg, end) +} + +func (a *List) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(vals.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *List) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *List) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualList(left, right *List) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *List) Len() int { return a.array.Len() } + +func (a *List) Offsets() []int32 { return a.offsets } + +func (a *List) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *List) Release() { + a.array.Release() + a.values.Release() +} + +func (a *List) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + start, end = int64(a.offsets[i+a.data.offset]), int64(a.offsets[i+a.data.offset+1]) + return +} + +// LargeList represents an immutable sequence of array values. +type LargeList struct { + array + values arrow.Array + offsets []int64 +} + +// NewLargeListData returns a new LargeList array value, from data. +func NewLargeListData(data arrow.ArrayData) *LargeList { + a := new(LargeList) + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *LargeList) ListValues() arrow.Array { return a.values } + +func (a *LargeList) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} +func (a *LargeList) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if !a.IsValid(i) { + o.WriteString("(null)") + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *LargeList) newListValue(i int) arrow.Array { + j := i + a.array.data.offset + beg := int64(a.offsets[j]) + end := int64(a.offsets[j+1]) + return NewSlice(a.values, beg, end) +} + +func (a *LargeList) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.offsets = arrow.Int64Traits.CastFromBytes(vals.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *LargeList) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *LargeList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualLargeList(left, right *LargeList) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *LargeList) Len() int { return a.array.Len() } + +func (a *LargeList) Offsets() []int64 { return a.offsets } + +func (a *LargeList) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + start, end = a.offsets[i], a.offsets[i+1] + return +} + +func (a *LargeList) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *LargeList) Release() { + a.array.Release() + a.values.Release() +} + +type baseListBuilder struct { + builder + + values Builder // value builder for the list's elements. + offsets Builder + + // actual list type + dt arrow.DataType + appendOffsetVal func(int) +} + +type ListLikeBuilder interface { + Builder + ValueBuilder() Builder + Append(bool) +} + +type ListBuilder struct { + baseListBuilder +} + +type LargeListBuilder struct { + baseListBuilder +} + +// NewListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder { + offsetBldr := NewInt32Builder(mem) + return &ListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + dt: arrow.ListOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + }, + } +} + +// NewListBuilderWithField takes a field to use for the child rather than just +// a datatype to allow for more customization. +func NewListBuilderWithField(mem memory.Allocator, field arrow.Field) *ListBuilder { + offsetBldr := NewInt32Builder(mem) + return &ListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + dt: arrow.ListOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + }, + } +} + +func (b *baseListBuilder) Type() arrow.DataType { + switch dt := b.dt.(type) { + case *arrow.ListType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.ListOfField(f) + case *arrow.LargeListType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.LargeListOfField(f) + } + return nil +} + +// NewLargeListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewLargeListBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListBuilder { + offsetBldr := NewInt64Builder(mem) + return &LargeListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + dt: arrow.LargeListOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + }, + } +} + +// NewLargeListBuilderWithField takes a field rather than just an element type +// to allow for more customization of the final type of the LargeList Array +func NewLargeListBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListBuilder { + offsetBldr := NewInt64Builder(mem) + return &LargeListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + dt: arrow.LargeListOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + }, + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *baseListBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + b.values.Release() + b.offsets.Release() + } + +} + +func (b *baseListBuilder) appendNextOffset() { + b.appendOffsetVal(b.values.Len()) +} + +func (b *baseListBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + b.appendNextOffset() +} + +func (b *baseListBuilder) AppendNull() { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(false) + b.appendNextOffset() +} + +func (b *baseListBuilder) AppendEmptyValue() { + b.Append(true) +} + +func (b *ListBuilder) AppendValues(offsets []int32, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int32Builder).AppendValues(offsets, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *LargeListBuilder) AppendValues(offsets []int64, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int64Builder).AppendValues(offsets, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *baseListBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *baseListBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.init(capacity + 1) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *baseListBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + b.offsets.Reserve(n) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *baseListBuilder) Resize(n int) { + b.resizeHelper(n) + b.offsets.Resize(n) +} + +func (b *baseListBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *baseListBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewArray() arrow.Array { + return b.NewListArray() +} + +// NewArray creates a LargeList array from the memory buffers used by the builder and resets the LargeListBuilder +// so it can be used to build a new array. +func (b *LargeListBuilder) NewArray() arrow.Array { + return b.NewLargeListArray() +} + +// NewListArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewListArray() (a *List) { + data := b.newData() + a = NewListData(data) + data.Release() + return +} + +// NewLargeListArray creates a List array from the memory buffers used by the builder and resets the LargeListBuilder +// so it can be used to build a new array. +func (b *LargeListBuilder) NewLargeListArray() (a *LargeList) { + data := b.newData() + a = NewLargeListData(data) + data.Release() + return +} + +func (b *baseListBuilder) newData() (data *Data) { + if b.offsets.Len() != b.length+1 { + b.appendNextOffset() + } + values := b.values.NewArray() + defer values.Release() + + var offsets *memory.Buffer + if b.offsets != nil { + arr := b.offsets.NewArray() + defer arr.Release() + offsets = arr.Data().Buffers()[1] + } + + data = NewData( + b.Type(), b.length, + []*memory.Buffer{ + b.nullBitmap, + offsets, + }, + []arrow.ArrayData{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + +func (b *baseListBuilder) AppendValueFromString(s string) error { + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + +func (b *baseListBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + b.Append(true) + if err := b.values.Unmarshal(dec); err != nil { + return err + } + // consume ']' + _, err := dec.Token() + return err + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Struct: b.dt.String(), + } + } + + return nil +} + +func (b *baseListBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *baseListBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("list builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*List)(nil) + _ arrow.Array = (*LargeList)(nil) + _ Builder = (*ListBuilder)(nil) + _ Builder = (*LargeListBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/map.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/map.go new file mode 100644 index 00000000..c28a3f9d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/map.go @@ -0,0 +1,333 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// Map represents an immutable sequence of Key/Value structs. It is a +// logical type that is implemented as a List. +type Map struct { + *List + keys, items arrow.Array +} + +// NewMapData returns a new Map array value, from data +func NewMapData(data arrow.ArrayData) *Map { + a := &Map{List: &List{}} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// KeysSorted checks the datatype that was used to construct this array and +// returns the KeysSorted boolean value used to denote if the key array is +// sorted for each list element. +// +// Important note: Nothing is enforced regarding the KeysSorted value, it is +// solely a metadata field that should be set if keys within each value are sorted. +// This value is not used at all in regards to comparisons / equality. +func (a *Map) KeysSorted() bool { return a.DataType().(*arrow.MapType).KeysSorted } + +func (a *Map) validateData(data *Data) { + if len(data.childData) != 1 || data.childData[0] == nil { + panic("arrow/array: expected one child array for map array") + } + + if data.childData[0].DataType().ID() != arrow.STRUCT { + panic("arrow/array: map array child should be struct type") + } + + if data.childData[0].NullN() != 0 { + panic("arrow/array: map array child array should have no nulls") + } + + if len(data.childData[0].Children()) != 2 { + panic("arrow/array: map array child array should have two fields") + } + + if data.childData[0].Children()[0].NullN() != 0 { + panic("arrow/array: map array keys array should have no nulls") + } +} + +func (a *Map) setData(data *Data) { + a.validateData(data) + + a.List.setData(data) + a.keys = MakeFromData(data.childData[0].Children()[0]) + a.items = MakeFromData(data.childData[0].Children()[1]) +} + +// Keys returns the full Array of Key values, equivalent to grabbing +// the key field of the child struct. +func (a *Map) Keys() arrow.Array { return a.keys } + +// Items returns the full Array of Item values, equivalent to grabbing +// the Value field (the second field) of the child struct. +func (a *Map) Items() arrow.Array { return a.items } + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (a *Map) Retain() { + a.List.Retain() + a.keys.Retain() + a.items.Retain() +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (a *Map) Release() { + a.List.Release() + a.keys.Release() + a.items.Release() +} + +func arrayEqualMap(left, right *Map) bool { + // since Map is implemented using a list, we can just use arrayEqualList + return arrayEqualList(left.List, right.List) +} + +type MapBuilder struct { + listBuilder *ListBuilder + + etype *arrow.MapType + keytype, itemtype arrow.DataType + keyBuilder, itemBuilder Builder + keysSorted bool +} + +// NewMapBuilder returns a builder, using the provided memory allocator. +// The created Map builder will create a map array whose keys will be a non-nullable +// array of type `keytype` and whose mapped items will be a nullable array of itemtype. +// +// KeysSorted is not enforced at all by the builder, it should only be set to true +// building using keys in sorted order for each value. The KeysSorted value will just be +// used when creating the DataType for the map. +// +// Example +// +// Simple example provided of converting a []map[string]int32 to an array.Map +// by using a MapBuilder: +// +// /* assume maplist == []map[string]int32 */ +// bldr := array.NewMapBuilder(memory.DefaultAllocator, arrow.BinaryTypes.String, arrow.PrimitiveTypes.Int32, false) +// defer bldr.Release() +// kb := bldr.KeyBuilder().(*array.StringBuilder) +// ib := bldr.ItemBuilder().(*array.Int32Builder) +// for _, m := range maplist { +// bldr.Append(true) +// for k, v := range m { +// kb.Append(k) +// ib.Append(v) +// } +// } +// maparr := bldr.NewMapArray() +// defer maparr.Release() +func NewMapBuilder(mem memory.Allocator, keytype, itemtype arrow.DataType, keysSorted bool) *MapBuilder { + etype := arrow.MapOf(keytype, itemtype) + etype.KeysSorted = keysSorted + listBldr := NewListBuilder(mem, etype.ValueType()) + keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0) + keyBldr.Retain() + itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1) + itemBldr.Retain() + return &MapBuilder{ + listBuilder: listBldr, + keyBuilder: keyBldr, + itemBuilder: itemBldr, + etype: etype, + keytype: keytype, + itemtype: itemtype, + keysSorted: keysSorted, + } +} + +func NewMapBuilderWithType(mem memory.Allocator, dt *arrow.MapType) *MapBuilder { + listBldr := NewListBuilder(mem, dt.ValueType()) + keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0) + keyBldr.Retain() + itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1) + itemBldr.Retain() + return &MapBuilder{ + listBuilder: listBldr, + keyBuilder: keyBldr, + itemBuilder: itemBldr, + etype: dt, + keytype: dt.KeyType(), + itemtype: dt.ValueType(), + keysSorted: dt.KeysSorted, + } +} + +func (b *MapBuilder) Type() arrow.DataType { return b.etype } + +// Retain increases the reference count by 1 for the sub-builders (list, key, item). +// Retain may be called simultaneously from multiple goroutines. +func (b *MapBuilder) Retain() { + b.listBuilder.Retain() + b.keyBuilder.Retain() + b.itemBuilder.Retain() +} + +// Release decreases the reference count by 1 for the sub builders (list, key, item). +func (b *MapBuilder) Release() { + b.listBuilder.Release() + b.keyBuilder.Release() + b.itemBuilder.Release() +} + +// Len returns the current number of Maps that are in the builder +func (b *MapBuilder) Len() int { return b.listBuilder.Len() } + +// Cap returns the total number of elements that can be stored +// without allocating additional memory. +func (b *MapBuilder) Cap() int { return b.listBuilder.Cap() } + +// NullN returns the number of null values in the array builder. +func (b *MapBuilder) NullN() int { return b.listBuilder.NullN() } + +// Append adds a new Map element to the array, calling Append(false) is +// equivalent to calling AppendNull. +func (b *MapBuilder) Append(v bool) { + b.adjustStructBuilderLen() + b.listBuilder.Append(v) +} + +// AppendNull adds a null map entry to the array. +func (b *MapBuilder) AppendNull() { + b.Append(false) +} + +func (b *MapBuilder) AppendEmptyValue() { + b.Append(true) +} + +// Reserve enough space for n maps +func (b *MapBuilder) Reserve(n int) { b.listBuilder.Reserve(n) } + +// Resize adjust the space allocated by b to n map elements. If n is greater than +// b.Cap(), additional memory will be allocated. If n is smaller, the allocated memory may be reduced. +func (b *MapBuilder) Resize(n int) { b.listBuilder.Resize(n) } + +// AppendValues is for bulk appending a group of elements with offsets provided +// and validity booleans provided. +func (b *MapBuilder) AppendValues(offsets []int32, valid []bool) { + b.adjustStructBuilderLen() + b.listBuilder.AppendValues(offsets, valid) +} + +func (b *MapBuilder) UnsafeAppendBoolToBitmap(v bool) { + b.listBuilder.UnsafeAppendBoolToBitmap(v) +} + +func (b *MapBuilder) init(capacity int) { b.listBuilder.init(capacity) } +func (b *MapBuilder) resize(newBits int, init func(int)) { b.listBuilder.resize(newBits, init) } + +func (b *MapBuilder) adjustStructBuilderLen() { + sb := b.listBuilder.ValueBuilder().(*StructBuilder) + if sb.Len() < b.keyBuilder.Len() { + valids := make([]bool, b.keyBuilder.Len()-sb.Len()) + for i := range valids { + valids[i] = true + } + sb.AppendValues(valids) + } +} + +// NewArray creates a new Map array from the memory buffers used by the builder, and +// resets the builder so it can be used again to build a new Map array. +func (b *MapBuilder) NewArray() arrow.Array { + return b.NewMapArray() +} + +// NewMapArray creates a new Map array from the memory buffers used by the builder, and +// resets the builder so it can be used again to build a new Map array. +func (b *MapBuilder) NewMapArray() (a *Map) { + if !b.etype.ItemField().Nullable && b.ItemBuilder().NullN() > 0 { + panic("arrow/array: item not nullable") + } + + data := b.newData() + defer data.Release() + a = NewMapData(data) + return +} + +func (b *MapBuilder) newData() (data *Data) { + b.adjustStructBuilderLen() + values := b.listBuilder.NewListArray() + defer values.Release() + + data = NewData(b.etype, + values.Len(), values.data.buffers, + values.data.childData, values.NullN(), 0) + return +} + +// KeyBuilder returns a builder that can be used to populate the keys of the maps. +func (b *MapBuilder) KeyBuilder() Builder { return b.keyBuilder } + +// ItemBuilder returns a builder that can be used to populate the values that the +// keys point to. +func (b *MapBuilder) ItemBuilder() Builder { return b.itemBuilder } + +// ValueBuilder can be used instead of separately using the Key/Item builders +// to build the list as a List of Structs rather than building the keys/items +// separately. +func (b *MapBuilder) ValueBuilder() Builder { + return b.listBuilder.ValueBuilder() +} + +func (b *MapBuilder) AppendValueFromString(s string) error { + return arrow.ErrNotImplemented +} + +func (b *MapBuilder) UnmarshalOne(dec *json.Decoder) error { + return b.listBuilder.UnmarshalOne(dec) +} + +func (b *MapBuilder) Unmarshal(dec *json.Decoder) error { + return b.listBuilder.Unmarshal(dec) +} + +func (b *MapBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("map builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Map)(nil) + _ Builder = (*MapBuilder)(nil) + _ ListLikeBuilder = (*MapBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/null.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/null.go new file mode 100644 index 00000000..8a547a5b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/null.go @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// Null represents an immutable, degenerate array with no physical storage. +type Null struct { + array +} + +// NewNull returns a new Null array value of size n. +func NewNull(n int) *Null { + a := &Null{} + a.refCount = 1 + data := NewData( + arrow.Null, n, + []*memory.Buffer{nil}, + nil, + n, + 0, + ) + a.setData(data) + data.Release() + return a +} + +// NewNullData returns a new Null array value, from data. +func NewNullData(data arrow.ArrayData) *Null { + a := &Null{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Null) ValueStr(i int) string { + return NullValueStr +} + +func (a *Null) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + o.WriteString("(null)") + } + o.WriteString("]") + return o.String() +} + +func (a *Null) setData(data *Data) { + a.array.setData(data) + a.array.nullBitmapBytes = nil + a.array.data.nulls = a.array.data.length +} + +func (a *Null) GetOneForMarshal(i int) interface{} { + return nil +} + +func (a *Null) MarshalJSON() ([]byte, error) { + return json.Marshal(make([]interface{}, a.Len())) +} + +type NullBuilder struct { + builder +} + +// NewNullBuilder returns a builder, using the provided memory allocator. +func NewNullBuilder(mem memory.Allocator) *NullBuilder { + return &NullBuilder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *NullBuilder) Type() arrow.DataType { return arrow.Null } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *NullBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + } +} + +func (b *NullBuilder) AppendNull() { + b.builder.length++ + b.builder.nulls++ +} + +func (b *NullBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + return fmt.Errorf("cannot convert %q to null", s) +} +func (b *NullBuilder) AppendEmptyValue() { b.AppendNull() } + +func (*NullBuilder) Reserve(size int) {} +func (*NullBuilder) Resize(size int) {} + +func (*NullBuilder) init(cap int) {} +func (*NullBuilder) resize(newBits int, init func(int)) {} + +// NewArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder +// so it can be used to build a new array. +func (b *NullBuilder) NewArray() arrow.Array { + return b.NewNullArray() +} + +// NewNullArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder +// so it can be used to build a new array. +func (b *NullBuilder) NewNullArray() (a *Null) { + data := b.newData() + a = NewNullData(data) + data.Release() + return +} + +func (b *NullBuilder) newData() (data *Data) { + data = NewData( + arrow.Null, b.length, + []*memory.Buffer{nil}, + nil, + b.nulls, + 0, + ) + b.reset() + + return +} + +func (b *NullBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t.(type) { + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(nil), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *NullBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *NullBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("null builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Null)(nil) + _ Builder = (*NullBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go new file mode 100644 index 00000000..72db2d9d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go @@ -0,0 +1,1520 @@ +// Code generated by array/numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strconv" + "strings" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/goccy/go-json" +) + +// A type which represents an immutable sequence of int64 values. +type Int64 struct { + array + values []int64 +} + +// NewInt64Data creates a new Int64. +func NewInt64Data(data arrow.ArrayData) *Int64 { + a := &Int64{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Int64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int64) Value(i int) int64 { return a.values[i] } + +// Values returns the values. +func (a *Int64) Int64Values() []int64 { return a.values } + +// String returns a string representation of the array. +func (a *Int64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Int64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + +func (a *Int64) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Int64) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualInt64(left, right *Int64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint64 values. +type Uint64 struct { + array + values []uint64 +} + +// NewUint64Data creates a new Uint64. +func NewUint64Data(data arrow.ArrayData) *Uint64 { + a := &Uint64{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Uint64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint64) Value(i int) uint64 { return a.values[i] } + +// Values returns the values. +func (a *Uint64) Uint64Values() []uint64 { return a.values } + +// String returns a string representation of the array. +func (a *Uint64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Uint64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + +func (a *Uint64) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Uint64) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualUint64(left, right *Uint64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of float64 values. +type Float64 struct { + array + values []float64 +} + +// NewFloat64Data creates a new Float64. +func NewFloat64Data(data arrow.ArrayData) *Float64 { + a := &Float64{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Float64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Float64) Value(i int) float64 { return a.values[i] } + +// Values returns the values. +func (a *Float64) Float64Values() []float64 { return a.values } + +// String returns a string representation of the array. +func (a *Float64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Float64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64) +} + +func (a *Float64) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Float64) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualFloat64(left, right *Float64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int32 values. +type Int32 struct { + array + values []int32 +} + +// NewInt32Data creates a new Int32. +func NewInt32Data(data arrow.ArrayData) *Int32 { + a := &Int32{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Int32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int32) Value(i int) int32 { return a.values[i] } + +// Values returns the values. +func (a *Int32) Int32Values() []int32 { return a.values } + +// String returns a string representation of the array. +func (a *Int32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Int32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + +func (a *Int32) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Int32) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualInt32(left, right *Int32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint32 values. +type Uint32 struct { + array + values []uint32 +} + +// NewUint32Data creates a new Uint32. +func NewUint32Data(data arrow.ArrayData) *Uint32 { + a := &Uint32{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Uint32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint32) Value(i int) uint32 { return a.values[i] } + +// Values returns the values. +func (a *Uint32) Uint32Values() []uint32 { return a.values } + +// String returns a string representation of the array. +func (a *Uint32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Uint32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + +func (a *Uint32) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Uint32) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualUint32(left, right *Uint32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of float32 values. +type Float32 struct { + array + values []float32 +} + +// NewFloat32Data creates a new Float32. +func NewFloat32Data(data arrow.ArrayData) *Float32 { + a := &Float32{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Float32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Float32) Value(i int) float32 { return a.values[i] } + +// Values returns the values. +func (a *Float32) Float32Values() []float32 { return a.values } + +// String returns a string representation of the array. +func (a *Float32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Float32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Float32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Float32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32) +} + +func (a *Float32) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Float32) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualFloat32(left, right *Float32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int16 values. +type Int16 struct { + array + values []int16 +} + +// NewInt16Data creates a new Int16. +func NewInt16Data(data arrow.ArrayData) *Int16 { + a := &Int16{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Int16) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int16) Value(i int) int16 { return a.values[i] } + +// Values returns the values. +func (a *Int16) Int16Values() []int16 { return a.values } + +// String returns a string representation of the array. +func (a *Int16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Int16) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + +func (a *Int16) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Int16) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualInt16(left, right *Int16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint16 values. +type Uint16 struct { + array + values []uint16 +} + +// NewUint16Data creates a new Uint16. +func NewUint16Data(data arrow.ArrayData) *Uint16 { + a := &Uint16{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Uint16) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint16) Value(i int) uint16 { return a.values[i] } + +// Values returns the values. +func (a *Uint16) Uint16Values() []uint16 { return a.values } + +// String returns a string representation of the array. +func (a *Uint16) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint16) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint16Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Uint16) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + +func (a *Uint16) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return a.values[i] +} + +func (a *Uint16) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualUint16(left, right *Uint16) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of int8 values. +type Int8 struct { + array + values []int8 +} + +// NewInt8Data creates a new Int8. +func NewInt8Data(data arrow.ArrayData) *Int8 { + a := &Int8{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Int8) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Int8) Value(i int) int8 { return a.values[i] } + +// Values returns the values. +func (a *Int8) Int8Values() []int8 { return a.values } + +// String returns a string representation of the array. +func (a *Int8) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Int8) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Int8Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Int8) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + +func (a *Int8) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return float64(a.values[i]) // prevent uint8 from being seen as binary data +} + +func (a *Int8) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualInt8(left, right *Int8) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of uint8 values. +type Uint8 struct { + array + values []uint8 +} + +// NewUint8Data creates a new Uint8. +func NewUint8Data(data arrow.ArrayData) *Uint8 { + a := &Uint8{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Uint8) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Uint8) Value(i int) uint8 { return a.values[i] } + +// Values returns the values. +func (a *Uint8) Uint8Values() []uint8 { return a.values } + +// String returns a string representation of the array. +func (a *Uint8) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Uint8) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Uint8Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Uint8) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + +func (a *Uint8) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + return float64(a.values[i]) // prevent uint8 from being seen as binary data +} + +func (a *Uint8) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } + + return json.Marshal(vals) +} + +func arrayEqualUint8(left, right *Uint8) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Timestamp values. +type Timestamp struct { + array + values []arrow.Timestamp +} + +// NewTimestampData creates a new Timestamp. +func NewTimestampData(data arrow.ArrayData) *Timestamp { + a := &Timestamp{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Timestamp) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] } + +// Values returns the values. +func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values } + +// String returns a string representation of the array. +func (a *Timestamp) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Timestamp) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.TimestampTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Timestamp) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999") +} + +func (a *Timestamp) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999") +} + +func (a *Timestamp) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualTimestamp(left, right *Timestamp) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Time32 values. +type Time32 struct { + array + values []arrow.Time32 +} + +// NewTime32Data creates a new Time32. +func NewTime32Data(data arrow.ArrayData) *Time32 { + a := &Time32{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Time32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Time32) Value(i int) arrow.Time32 { return a.values[i] } + +// Values returns the values. +func (a *Time32) Time32Values() []arrow.Time32 { return a.values } + +// String returns a string representation of the array. +func (a *Time32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Time32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Time32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Time32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString(a.DataType().(*arrow.Time32Type).Unit) +} + +func (a *Time32) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime(a.DataType().(*arrow.Time32Type).Unit).Format("15:04:05.999999999") +} + +func (a *Time32) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualTime32(left, right *Time32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Time64 values. +type Time64 struct { + array + values []arrow.Time64 +} + +// NewTime64Data creates a new Time64. +func NewTime64Data(data arrow.ArrayData) *Time64 { + a := &Time64{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Time64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Time64) Value(i int) arrow.Time64 { return a.values[i] } + +// Values returns the values. +func (a *Time64) Time64Values() []arrow.Time64 { return a.values } + +// String returns a string representation of the array. +func (a *Time64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Time64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Time64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Time64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString(a.DataType().(*arrow.Time64Type).Unit) +} + +func (a *Time64) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime(a.DataType().(*arrow.Time64Type).Unit).Format("15:04:05.999999999") +} + +func (a *Time64) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualTime64(left, right *Time64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Date32 values. +type Date32 struct { + array + values []arrow.Date32 +} + +// NewDate32Data creates a new Date32. +func NewDate32Data(data arrow.ArrayData) *Date32 { + a := &Date32{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Date32) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Date32) Value(i int) arrow.Date32 { return a.values[i] } + +// Values returns the values. +func (a *Date32) Date32Values() []arrow.Date32 { return a.values } + +// String returns a string representation of the array. +func (a *Date32) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Date32) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Date32Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Date32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString() +} + +func (a *Date32) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime().Format("2006-01-02") +} + +func (a *Date32) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualDate32(left, right *Date32) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Date64 values. +type Date64 struct { + array + values []arrow.Date64 +} + +// NewDate64Data creates a new Date64. +func NewDate64Data(data arrow.ArrayData) *Date64 { + a := &Date64{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Date64) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Date64) Value(i int) arrow.Date64 { return a.values[i] } + +// Values returns the values. +func (a *Date64) Date64Values() []arrow.Date64 { return a.values } + +// String returns a string representation of the array. +func (a *Date64) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Date64) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.Date64Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Date64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString() +} + +func (a *Date64) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime().Format("2006-01-02") +} + +func (a *Date64) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualDate64(left, right *Date64) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A type which represents an immutable sequence of arrow.Duration values. +type Duration struct { + array + values []arrow.Duration +} + +// NewDurationData creates a new Duration. +func NewDurationData(data arrow.ArrayData) *Duration { + a := &Duration{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Duration) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Duration) Value(i int) arrow.Duration { return a.values[i] } + +// Values returns the values. +func (a *Duration) DurationValues() []arrow.Duration { return a.values } + +// String returns a string representation of the array. +func (a *Duration) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Duration) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.DurationTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Duration) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit) +} + +func (a *Duration) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit.String()) +} + +func (a *Duration) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualDuration(left, right *Duration) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl new file mode 100644 index 00000000..01418520 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl @@ -0,0 +1,162 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "fmt" + "strings" + "time" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/goccy/go-json" +) + +{{range .In}} + +// A type which represents an immutable sequence of {{or .QualifiedType .Type}} values. +type {{.Name}} struct { + array + values []{{or .QualifiedType .Type}} +} + +// New{{.Name}}Data creates a new {{.Name}}. +func New{{.Name}}Data(data arrow.ArrayData) *{{.Name}} { + a := &{{.Name}}{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *{{.Name}}) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *{{.Name}}) Value(i int) {{or .QualifiedType .Type}} { return a.values[i] } + +// Values returns the values. +func (a *{{.Name}}) {{.Name}}Values() []{{or .QualifiedType .Type}} { return a.values } + +// String returns a string representation of the array. +func (a *{{.Name}}) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *{{.Name}}) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.{{.Name}}Traits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *{{.Name}}) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } +{{if or (eq .Name "Date32") (eq .Name "Date64") -}} + return a.values[i].FormattedString() +{{else if or (eq .Name "Time32") (eq .Name "Time64") -}} + return a.values[i].FormattedString(a.DataType().(*{{.QualifiedType}}Type).Unit) +{{else if or (eq .Name "Timestamp") -}} + return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("2006-01-02 15:04:05.999999999") +{{else if (eq .Name "Duration") -}} + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit) +{{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}} + return strconv.FormatInt(int64(a.Value(i)), 10) +{{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}} + return strconv.FormatUint(uint64(a.Value(i)), 10) +{{else if or (eq .Name "Float32") -}} + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32) +{{else if or (eq .Name "Float64") -}} + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64) +{{else}} + return fmt.Sprintf("%v", a.values[i]) +{{end -}} +} + +func (a *{{.Name}}) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } +{{if or (eq .Name "Date32") (eq .Name "Date64") -}} + return a.values[i].ToTime().Format("2006-01-02") +{{else if or (eq .Name "Time32") (eq .Name "Time64") -}} + return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("15:04:05.999999999") +{{else if or (eq .Name "Timestamp") -}} + return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("2006-01-02 15:04:05.999999999") +{{else if (eq .Name "Duration") -}} + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit.String()) +{{else if (eq .Size "1")}} + return float64(a.values[i]) // prevent uint8 from being seen as binary data +{{else}} + return a.values[i] +{{end -}} +} + +func (a *{{.Name}}) MarshalJSON() ([]byte, error) { +{{if .QualifiedType -}} + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } +{{else -}} + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + } else { + vals[i] = nil + } + } +{{end}} + return json.Marshal(vals) +} + +func arrayEqual{{.Name}}(left, right *{{.Name}}) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +{{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go new file mode 100644 index 00000000..f893a633 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go @@ -0,0 +1,3648 @@ +// Code generated by array/numericbuilder.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +type Int64Builder struct { + builder + + data *memory.Buffer + rawData []int64 +} + +func NewInt64Builder(mem memory.Allocator) *Int64Builder { + return &Int64Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Int64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int64 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int64Builder) Append(v int64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int64Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Int64Builder) UnsafeAppend(v int64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int64Builder) AppendValues(v []int64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int64Traits.BytesRequired(n)) + b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder +// so it can be used to build a new array. +func (b *Int64Builder) NewArray() arrow.Array { + return b.NewInt64Array() +} + +// NewInt64Array creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder +// so it can be used to build a new array. +func (b *Int64Builder) NewInt64Array() (a *Int64) { + data := b.newData() + a = NewInt64Data(data) + data.Release() + return +} + +func (b *Int64Builder) newData() (data *Data) { + bytesRequired := arrow.Int64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Int64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int64(v)) + return nil +} + +func (b *Int64Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseInt(v, 10, 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(int64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int64(f)) + case float64: + b.Append(int64(v)) + case json.Number: + f, err := strconv.ParseInt(v.String(), 10, 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(int64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int64(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(int64(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Int64Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Int64Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Uint64Builder struct { + builder + + data *memory.Buffer + rawData []uint64 +} + +func NewUint64Builder(mem memory.Allocator) *Uint64Builder { + return &Uint64Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Uint64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint64 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint64Builder) Append(v uint64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint64Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Uint64Builder) UnsafeAppend(v uint64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint64Builder) AppendValues(v []uint64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint64Traits.BytesRequired(n)) + b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder +// so it can be used to build a new array. +func (b *Uint64Builder) NewArray() arrow.Array { + return b.NewUint64Array() +} + +// NewUint64Array creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder +// so it can be used to build a new array. +func (b *Uint64Builder) NewUint64Array() (a *Uint64) { + data := b.newData() + a = NewUint64Data(data) + data.Release() + return +} + +func (b *Uint64Builder) newData() (data *Data) { + bytesRequired := arrow.Uint64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Uint64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint64(v)) + return nil +} + +func (b *Uint64Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseUint(v, 10, 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(uint64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint64(f)) + case float64: + b.Append(uint64(v)) + case json.Number: + f, err := strconv.ParseUint(v.String(), 10, 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(uint64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint64(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(uint64(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Uint64Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Uint64Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Float64Builder struct { + builder + + data *memory.Buffer + rawData []float64 +} + +func NewFloat64Builder(mem memory.Allocator) *Float64Builder { + return &Float64Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Float64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float64 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float64Builder) Append(v float64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float64Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Float64Builder) UnsafeAppend(v float64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float64Builder) AppendValues(v []float64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Float64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Float64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float64Traits.BytesRequired(n)) + b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder +// so it can be used to build a new array. +func (b *Float64Builder) NewArray() arrow.Array { + return b.NewFloat64Array() +} + +// NewFloat64Array creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder +// so it can be used to build a new array. +func (b *Float64Builder) NewFloat64Array() (a *Float64) { + data := b.newData() + a = NewFloat64Data(data) + data.Release() + return +} + +func (b *Float64Builder) newData() (data *Data) { + bytesRequired := arrow.Float64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Float64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Float64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(float64(v)) + return nil +} + +func (b *Float64Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseFloat(v, 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(float64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(float64(f)) + case float64: + b.Append(float64(v)) + case json.Number: + f, err := strconv.ParseFloat(v.String(), 8*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(float64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(float64(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(float64(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Float64Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Float64Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Int32Builder struct { + builder + + data *memory.Buffer + rawData []int32 +} + +func NewInt32Builder(mem memory.Allocator) *Int32Builder { + return &Int32Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Int32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int32 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int32Builder) Append(v int32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int32Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Int32Builder) UnsafeAppend(v int32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int32Builder) AppendValues(v []int32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int32Traits.BytesRequired(n)) + b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder +// so it can be used to build a new array. +func (b *Int32Builder) NewArray() arrow.Array { + return b.NewInt32Array() +} + +// NewInt32Array creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder +// so it can be used to build a new array. +func (b *Int32Builder) NewInt32Array() (a *Int32) { + data := b.newData() + a = NewInt32Data(data) + data.Release() + return +} + +func (b *Int32Builder) newData() (data *Data) { + bytesRequired := arrow.Int32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Int32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int32(v)) + return nil +} + +func (b *Int32Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseInt(v, 10, 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(int32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int32(f)) + case float64: + b.Append(int32(v)) + case json.Number: + f, err := strconv.ParseInt(v.String(), 10, 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(int32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int32(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(int32(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Int32Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Int32Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Uint32Builder struct { + builder + + data *memory.Buffer + rawData []uint32 +} + +func NewUint32Builder(mem memory.Allocator) *Uint32Builder { + return &Uint32Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Uint32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint32 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint32Builder) Append(v uint32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint32Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Uint32Builder) UnsafeAppend(v uint32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint32Builder) AppendValues(v []uint32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint32Traits.BytesRequired(n)) + b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder +// so it can be used to build a new array. +func (b *Uint32Builder) NewArray() arrow.Array { + return b.NewUint32Array() +} + +// NewUint32Array creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder +// so it can be used to build a new array. +func (b *Uint32Builder) NewUint32Array() (a *Uint32) { + data := b.newData() + a = NewUint32Data(data) + data.Release() + return +} + +func (b *Uint32Builder) newData() (data *Data) { + bytesRequired := arrow.Uint32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Uint32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint32(v)) + return nil +} + +func (b *Uint32Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseUint(v, 10, 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(uint32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint32(f)) + case float64: + b.Append(uint32(v)) + case json.Number: + f, err := strconv.ParseUint(v.String(), 10, 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(uint32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint32(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(uint32(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Uint32Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Uint32Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Float32Builder struct { + builder + + data *memory.Buffer + rawData []float32 +} + +func NewFloat32Builder(mem memory.Allocator) *Float32Builder { + return &Float32Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Float32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float32 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Float32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Float32Builder) Append(v float32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Float32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Float32Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Float32Builder) UnsafeAppend(v float32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Float32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Float32Builder) AppendValues(v []float32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Float32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Float32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Float32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Float32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Float32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Float32Traits.BytesRequired(n)) + b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder +// so it can be used to build a new array. +func (b *Float32Builder) NewArray() arrow.Array { + return b.NewFloat32Array() +} + +// NewFloat32Array creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder +// so it can be used to build a new array. +func (b *Float32Builder) NewFloat32Array() (a *Float32) { + data := b.newData() + a = NewFloat32Data(data) + data.Release() + return +} + +func (b *Float32Builder) newData() (data *Data) { + bytesRequired := arrow.Float32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Float32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Float32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(float32(v)) + return nil +} + +func (b *Float32Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseFloat(v, 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(float32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(float32(f)) + case float64: + b.Append(float32(v)) + case json.Number: + f, err := strconv.ParseFloat(v.String(), 4*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(float32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(float32(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(float32(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Float32Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Float32Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Int16Builder struct { + builder + + data *memory.Buffer + rawData []int16 +} + +func NewInt16Builder(mem memory.Allocator) *Int16Builder { + return &Int16Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Int16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int16 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int16Builder) Append(v int16) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int16Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Int16Builder) UnsafeAppend(v int16) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int16Builder) AppendValues(v []int16, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int16Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int16Traits.BytesRequired(n)) + b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder +// so it can be used to build a new array. +func (b *Int16Builder) NewArray() arrow.Array { + return b.NewInt16Array() +} + +// NewInt16Array creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder +// so it can be used to build a new array. +func (b *Int16Builder) NewInt16Array() (a *Int16) { + data := b.newData() + a = NewInt16Data(data) + data.Release() + return +} + +func (b *Int16Builder) newData() (data *Data) { + bytesRequired := arrow.Int16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Int16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 2*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int16(v)) + return nil +} + +func (b *Int16Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseInt(v, 10, 2*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(int16(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int16(f)) + case float64: + b.Append(int16(v)) + case json.Number: + f, err := strconv.ParseInt(v.String(), 10, 2*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(int16(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int16(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(int16(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Int16Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Int16Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Uint16Builder struct { + builder + + data *memory.Buffer + rawData []uint16 +} + +func NewUint16Builder(mem memory.Allocator) *Uint16Builder { + return &Uint16Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Uint16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint16 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint16Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint16Builder) Append(v uint16) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint16Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint16Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Uint16Builder) UnsafeAppend(v uint16) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint16Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint16Builder) AppendValues(v []uint16, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint16Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint16Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint16Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint16Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint16Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint16Traits.BytesRequired(n)) + b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder +// so it can be used to build a new array. +func (b *Uint16Builder) NewArray() arrow.Array { + return b.NewUint16Array() +} + +// NewUint16Array creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder +// so it can be used to build a new array. +func (b *Uint16Builder) NewUint16Array() (a *Uint16) { + data := b.newData() + a = NewUint16Data(data) + data.Release() + return +} + +func (b *Uint16Builder) newData() (data *Data) { + bytesRequired := arrow.Uint16Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Uint16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 2*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint16(v)) + return nil +} + +func (b *Uint16Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseUint(v, 10, 2*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(uint16(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint16(f)) + case float64: + b.Append(uint16(v)) + case json.Number: + f, err := strconv.ParseUint(v.String(), 10, 2*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(uint16(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint16(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(uint16(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Uint16Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Uint16Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Int8Builder struct { + builder + + data *memory.Buffer + rawData []int8 +} + +func NewInt8Builder(mem memory.Allocator) *Int8Builder { + return &Int8Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Int8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int8 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Int8Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Int8Builder) Append(v int8) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Int8Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Int8Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Int8Builder) UnsafeAppend(v int8) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Int8Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Int8Builder) AppendValues(v []int8, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Int8Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Int8Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Int8Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Int8Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Int8Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Int8Traits.BytesRequired(n)) + b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder +// so it can be used to build a new array. +func (b *Int8Builder) NewArray() arrow.Array { + return b.NewInt8Array() +} + +// NewInt8Array creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder +// so it can be used to build a new array. +func (b *Int8Builder) NewInt8Array() (a *Int8) { + data := b.newData() + a = NewInt8Data(data) + data.Release() + return +} + +func (b *Int8Builder) newData() (data *Data) { + bytesRequired := arrow.Int8Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Int8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Int8Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 1*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int8(v)) + return nil +} + +func (b *Int8Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseInt(v, 10, 1*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(int8(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int8(f)) + case float64: + b.Append(int8(v)) + case json.Number: + f, err := strconv.ParseInt(v.String(), 10, 1*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(int8(0)), + Offset: dec.InputOffset(), + } + } + b.Append(int8(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(int8(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Int8Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Int8Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Uint8Builder struct { + builder + + data *memory.Buffer + rawData []uint8 +} + +func NewUint8Builder(mem memory.Allocator) *Uint8Builder { + return &Uint8Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Uint8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint8 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Uint8Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Uint8Builder) Append(v uint8) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Uint8Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Uint8Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Uint8Builder) UnsafeAppend(v uint8) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Uint8Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Uint8Builder) AppendValues(v []uint8, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Uint8Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Uint8Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Uint8Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Uint8Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Uint8Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Uint8Traits.BytesRequired(n)) + b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder +// so it can be used to build a new array. +func (b *Uint8Builder) NewArray() arrow.Array { + return b.NewUint8Array() +} + +// NewUint8Array creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder +// so it can be used to build a new array. +func (b *Uint8Builder) NewUint8Array() (a *Uint8) { + data := b.newData() + a = NewUint8Data(data) + data.Release() + return +} + +func (b *Uint8Builder) newData() (data *Data) { + bytesRequired := arrow.Uint8Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Uint8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Uint8Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 1*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint8(v)) + return nil +} + +func (b *Uint8Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + + case string: + f, err := strconv.ParseUint(v, 10, 1*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(uint8(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint8(f)) + case float64: + b.Append(uint8(v)) + case json.Number: + f, err := strconv.ParseUint(v.String(), 10, 1*8) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(uint8(0)), + Offset: dec.InputOffset(), + } + } + b.Append(uint8(f)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(uint8(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Uint8Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Uint8Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type TimestampBuilder struct { + builder + + dtype *arrow.TimestampType + data *memory.Buffer + rawData []arrow.Timestamp +} + +func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder { + return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *TimestampBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *TimestampBuilder) Append(v arrow.Timestamp) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *TimestampBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *TimestampBuilder) AppendEmptyValue() { + b.Append(0) +} + +func (b *TimestampBuilder) UnsafeAppend(v arrow.Timestamp) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *TimestampBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *TimestampBuilder) AppendValues(v []arrow.Timestamp, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.TimestampTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *TimestampBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.TimestampTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *TimestampBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *TimestampBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.TimestampTraits.BytesRequired(n)) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewArray() arrow.Array { + return b.NewTimestampArray() +} + +// NewTimestampArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewTimestampArray() (a *Timestamp) { + data := b.newData() + a = NewTimestampData(data) + data.Release() + return +} + +func (b *TimestampBuilder) newData() (data *Data) { + bytesRequired := arrow.TimestampTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *TimestampBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := arrow.TimestampFromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(v) + return nil +} + +func (b *TimestampBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + loc, _ := b.dtype.GetZone() + tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc) + + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(tm) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Timestamp(n)) + case float64: + b.Append(arrow.Timestamp(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *TimestampBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *TimestampBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Time32Builder struct { + builder + + dtype *arrow.Time32Type + data *memory.Buffer + rawData []arrow.Time32 +} + +func NewTime32Builder(mem memory.Allocator, dtype *arrow.Time32Type) *Time32Builder { + return &Time32Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +func (b *Time32Builder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Time32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Time32Builder) Append(v arrow.Time32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Time32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Time32Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Time32Builder) UnsafeAppend(v arrow.Time32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Time32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Time32Builder) AppendValues(v []arrow.Time32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Time32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Time32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Time32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Time32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Time32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Time32Traits.BytesRequired(n)) + b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder +// so it can be used to build a new array. +func (b *Time32Builder) NewArray() arrow.Array { + return b.NewTime32Array() +} + +// NewTime32Array creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder +// so it can be used to build a new array. +func (b *Time32Builder) NewTime32Array() (a *Time32) { + data := b.newData() + a = NewTime32Data(data) + data.Release() + return +} + +func (b *Time32Builder) newData() (data *Data) { + bytesRequired := arrow.Time32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Time32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := arrow.Time32FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + +func (b *Time32Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + tm, err := arrow.Time32FromString(v, b.dtype.Unit) + + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Time32(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(tm) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Time32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Time32(n)) + case float64: + b.Append(arrow.Time32(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Time32(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Time32Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Time32Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Time64Builder struct { + builder + + dtype *arrow.Time64Type + data *memory.Buffer + rawData []arrow.Time64 +} + +func NewTime64Builder(mem memory.Allocator, dtype *arrow.Time64Type) *Time64Builder { + return &Time64Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +func (b *Time64Builder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Time64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Time64Builder) Append(v arrow.Time64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Time64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Time64Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Time64Builder) UnsafeAppend(v arrow.Time64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Time64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Time64Builder) AppendValues(v []arrow.Time64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Time64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Time64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Time64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Time64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Time64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Time64Traits.BytesRequired(n)) + b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder +// so it can be used to build a new array. +func (b *Time64Builder) NewArray() arrow.Array { + return b.NewTime64Array() +} + +// NewTime64Array creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder +// so it can be used to build a new array. +func (b *Time64Builder) NewTime64Array() (a *Time64) { + data := b.newData() + a = NewTime64Data(data) + data.Release() + return +} + +func (b *Time64Builder) newData() (data *Data) { + bytesRequired := arrow.Time64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Time64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := arrow.Time64FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + +func (b *Time64Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + tm, err := arrow.Time64FromString(v, b.dtype.Unit) + + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Time64(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(tm) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Time64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Time64(n)) + case float64: + b.Append(arrow.Time64(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Time64(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Time64Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Time64Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Date32Builder struct { + builder + + data *memory.Buffer + rawData []arrow.Date32 +} + +func NewDate32Builder(mem memory.Allocator) *Date32Builder { + return &Date32Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Date32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date32 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Date32Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Date32Builder) Append(v arrow.Date32) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Date32Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Date32Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Date32Builder) UnsafeAppend(v arrow.Date32) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Date32Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Date32Builder) AppendValues(v []arrow.Date32, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Date32Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Date32Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Date32Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Date32Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Date32Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Date32Traits.BytesRequired(n)) + b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder +// so it can be used to build a new array. +func (b *Date32Builder) NewArray() arrow.Array { + return b.NewDate32Array() +} + +// NewDate32Array creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder +// so it can be used to build a new array. +func (b *Date32Builder) NewDate32Array() (a *Date32) { + data := b.newData() + a = NewDate32Data(data) + data.Release() + return +} + +func (b *Date32Builder) newData() (data *Data) { + bytesRequired := arrow.Date32Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Date32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Date32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date32FromTime(tm)) + return nil +} + +func (b *Date32Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + tm, err := time.Parse("2006-01-02", v) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Date32(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(arrow.Date32FromTime(tm)) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Date32(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Date32(n)) + case float64: + b.Append(arrow.Date32(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Date32(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Date32Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Date32Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type Date64Builder struct { + builder + + data *memory.Buffer + rawData []arrow.Date64 +} + +func NewDate64Builder(mem memory.Allocator) *Date64Builder { + return &Date64Builder{builder: builder{refCount: 1, mem: mem}} +} + +func (b *Date64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date64 } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Date64Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *Date64Builder) Append(v arrow.Date64) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *Date64Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *Date64Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *Date64Builder) UnsafeAppend(v arrow.Date64) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *Date64Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *Date64Builder) AppendValues(v []arrow.Date64, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.Date64Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *Date64Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.Date64Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *Date64Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *Date64Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.Date64Traits.BytesRequired(n)) + b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder +// so it can be used to build a new array. +func (b *Date64Builder) NewArray() arrow.Array { + return b.NewDate64Array() +} + +// NewDate64Array creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder +// so it can be used to build a new array. +func (b *Date64Builder) NewDate64Array() (a *Date64) { + data := b.newData() + a = NewDate64Data(data) + data.Release() + return +} + +func (b *Date64Builder) newData() (data *Data) { + bytesRequired := arrow.Date64Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(arrow.PrimitiveTypes.Date64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *Date64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date64FromTime(tm)) + return nil +} + +func (b *Date64Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + tm, err := time.Parse("2006-01-02", v) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Date64(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(arrow.Date64FromTime(tm)) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Date64(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Date64(n)) + case float64: + b.Append(arrow.Date64(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Date64(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *Date64Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *Date64Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type DurationBuilder struct { + builder + + dtype *arrow.DurationType + data *memory.Buffer + rawData []arrow.Duration +} + +func NewDurationBuilder(mem memory.Allocator, dtype *arrow.DurationType) *DurationBuilder { + return &DurationBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +func (b *DurationBuilder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *DurationBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *DurationBuilder) Append(v arrow.Duration) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *DurationBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *DurationBuilder) AppendEmptyValue() { + b.Append(0) +} + +func (b *DurationBuilder) UnsafeAppend(v arrow.Duration) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *DurationBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *DurationBuilder) AppendValues(v []arrow.Duration, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.DurationTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *DurationBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.DurationTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *DurationBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *DurationBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.DurationTraits.BytesRequired(n)) + b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder +// so it can be used to build a new array. +func (b *DurationBuilder) NewArray() arrow.Array { + return b.NewDurationArray() +} + +// NewDurationArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder +// so it can be used to build a new array. +func (b *DurationBuilder) NewDurationArray() (a *Duration) { + data := b.newData() + a = NewDurationData(data) + data.Release() + return +} + +func (b *DurationBuilder) newData() (data *Data) { + bytesRequired := arrow.DurationTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *DurationBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + return fmt.Errorf("%w: AppendValueFromString not implemented for Duration", arrow.ErrNotImplemented) + return nil +} + +func (b *DurationBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Duration(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Duration(n)) + case float64: + b.Append(arrow.Duration(v)) + case string: + // be flexible for specifying durations by accepting forms like + // 3h2m0.5s regardless of the unit and converting it to the proper + // precision. + val, err := time.ParseDuration(v) + if err != nil { + // if we got an error, maybe it was because the attempt to create + // a time.Duration (int64) in nanoseconds would overflow. check if + // the string is just a large number followed by the unit suffix + if strings.HasSuffix(v, b.dtype.Unit.String()) { + value, err := strconv.ParseInt(v[:len(v)-len(b.dtype.Unit.String())], 10, 64) + if err == nil { + b.Append(arrow.Duration(value)) + break + } + } + + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Duration(0)), + Offset: dec.InputOffset(), + } + } + + switch b.dtype.Unit { + case arrow.Nanosecond: + b.Append(arrow.Duration(val.Nanoseconds())) + case arrow.Microsecond: + b.Append(arrow.Duration(val.Microseconds())) + case arrow.Millisecond: + b.Append(arrow.Duration(val.Milliseconds())) + case arrow.Second: + b.Append(arrow.Duration(val.Seconds())) + } + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Duration(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *DurationBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *DurationBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ Builder = (*Int64Builder)(nil) + _ Builder = (*Uint64Builder)(nil) + _ Builder = (*Float64Builder)(nil) + _ Builder = (*Int32Builder)(nil) + _ Builder = (*Uint32Builder)(nil) + _ Builder = (*Float32Builder)(nil) + _ Builder = (*Int16Builder)(nil) + _ Builder = (*Uint16Builder)(nil) + _ Builder = (*Int8Builder)(nil) + _ Builder = (*Uint8Builder)(nil) + _ Builder = (*TimestampBuilder)(nil) + _ Builder = (*Time32Builder)(nil) + _ Builder = (*Time64Builder)(nil) + _ Builder = (*Date32Builder)(nil) + _ Builder = (*Date64Builder)(nil) + _ Builder = (*DurationBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl new file mode 100644 index 00000000..37781538 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl @@ -0,0 +1,440 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +{{range .In}} + +type {{.Name}}Builder struct { + builder + +{{if .Opt.Parametric -}} + dtype *arrow.{{.Name}}Type +{{end -}} + data *memory.Buffer + rawData []{{or .QualifiedType .Type}} +} + +{{if .Opt.Parametric}} +func New{{.Name}}Builder(mem memory.Allocator, dtype *arrow.{{.Name}}Type) *{{.Name}}Builder { + return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}, dtype: dtype} +} + +func (b *{{.Name}}Builder) Type() arrow.DataType { return b.dtype } + +{{else}} +func New{{.Name}}Builder(mem memory.Allocator) *{{.Name}}Builder { + return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}} +} + +func (b *{{.Name}}Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.{{.Name}} } +{{end}} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *{{.Name}}Builder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *{{.Name}}Builder) Append(v {{or .QualifiedType .Type}}) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *{{.Name}}Builder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *{{.Name}}Builder) AppendEmptyValue() { + b.Append(0) +} + +func (b *{{.Name}}Builder) UnsafeAppend(v {{or .QualifiedType .Type}}) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *{{.Name}}Builder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *{{.Name}}Builder) AppendValues(v []{{or .QualifiedType .Type}}, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.{{.Name}}Traits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *{{.Name}}Builder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.{{.Name}}Traits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *{{.Name}}Builder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *{{.Name}}Builder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.{{.Name}}Traits.BytesRequired(n)) + b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder +// so it can be used to build a new array. +func (b *{{.Name}}Builder) NewArray() arrow.Array { + return b.New{{.Name}}Array() +} + +// New{{.Name}}Array creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder +// so it can be used to build a new array. +func (b *{{.Name}}Builder) New{{.Name}}Array() (a *{{.Name}}) { + data := b.newData() + a = New{{.Name}}Data(data) + data.Release() + return +} + +func (b *{{.Name}}Builder) newData() (data *Data) { + bytesRequired := arrow.{{.Name}}Traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } +{{if .Opt.Parametric -}} + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) +{{else -}} + data = NewData(arrow.PrimitiveTypes.{{.Name}}, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) +{{end -}} + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *{{.Name}}Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + {{if or (eq .Name "Date32") -}} + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date32FromTime(tm)) + {{else if or (eq .Name "Date64") -}} + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date64FromTime(tm)) + {{else if or (eq .Name "Time32") -}} + val, err := arrow.Time32FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + {{else if or (eq .Name "Time64") -}} + val, err := arrow.Time64FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + {{else if or (eq .Name "Timestamp") -}} + v, err := arrow.TimestampFromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(v) + {{else if (eq .Name "Duration") -}} + return fmt.Errorf("%w: AppendValueFromString not implemented for Duration", arrow.ErrNotImplemented) + {{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}} + v, err := strconv.ParseInt(s, 10, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}} + v, err := strconv.ParseUint(s, 10, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{else if or (eq .Name "Float32") (eq .Name "Float64") -}} + v, err := strconv.ParseFloat(s, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{else}} + return fmt.Errorf("%w: AppendValueFromString not implemented for {{.Name}}", ErrNotImplemented) + {{end -}} + return nil +} + +func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() +{{if or (eq .Name "Date32") (eq .Name "Date64") -}} + case string: + tm, err := time.Parse("2006-01-02", v) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + + b.Append({{.QualifiedType}}FromTime(tm)) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + b.Append({{.QualifiedType}}(n)) + case float64: + b.Append({{.QualifiedType}}(v)) +{{else if or (eq .Name "Time32") (eq .Name "Time64") (eq .Name "Timestamp") -}} + case string: +{{if (eq .Name "Timestamp") -}} + loc, _ := b.dtype.GetZone() + tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc) +{{else -}} + tm, err := {{.QualifiedType}}FromString(v, b.dtype.Unit) +{{end}} + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(tm) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + b.Append({{.QualifiedType}}(n)) + case float64: + b.Append({{.QualifiedType}}(v)) +{{else if eq .Name "Duration" -}} + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + b.Append({{.QualifiedType}}(n)) + case float64: + b.Append({{.QualifiedType}}(v)) + case string: + // be flexible for specifying durations by accepting forms like + // 3h2m0.5s regardless of the unit and converting it to the proper + // precision. + val, err := time.ParseDuration(v) + if err != nil { + // if we got an error, maybe it was because the attempt to create + // a time.Duration (int64) in nanoseconds would overflow. check if + // the string is just a large number followed by the unit suffix + if strings.HasSuffix(v, b.dtype.Unit.String()) { + value, err := strconv.ParseInt(v[:len(v)-len(b.dtype.Unit.String())], 10, 64) + if err == nil { + b.Append(arrow.Duration(value)) + break + } + } + + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf({{.QualifiedType}}(0)), + Offset: dec.InputOffset(), + } + } + + switch b.dtype.Unit { + case arrow.Nanosecond: + b.Append({{.QualifiedType}}(val.Nanoseconds())) + case arrow.Microsecond: + b.Append({{.QualifiedType}}(val.Microseconds())) + case arrow.Millisecond: + b.Append({{.QualifiedType}}(val.Milliseconds())) + case arrow.Second: + b.Append({{.QualifiedType}}(val.Seconds())) + } +{{else}} + case string: +{{if or (eq .Name "Float32") (eq .Name "Float64") -}} + f, err := strconv.ParseFloat(v, {{.Size}}*8) +{{else if eq (printf "%.1s" .Name) "U" -}} + f, err := strconv.ParseUint(v, 10, {{.Size}}*8) +{{else -}} + f, err := strconv.ParseInt(v, 10, {{.Size}}*8) +{{end -}} + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf({{.name}}(0)), + Offset: dec.InputOffset(), + } + } + b.Append({{.name}}(f)) + case float64: + b.Append({{.name}}(v)) + case json.Number: +{{if or (eq .Name "Float32") (eq .Name "Float64") -}} + f, err := strconv.ParseFloat(v.String(), {{.Size}}*8) +{{else if eq (printf "%.1s" .Name) "U" -}} + f, err := strconv.ParseUint(v.String(), 10, {{.Size}}*8) +{{else -}} + f, err := strconv.ParseInt(v.String(), 10, {{.Size}}*8) +{{end -}} + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf({{.name}}(0)), + Offset: dec.InputOffset(), + } + } + b.Append({{.name}}(f)) +{{end}} + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf({{or .QualifiedType .Type}}(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *{{.Name}}Builder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *{{.Name}}Builder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} +{{end}} + +var ( +{{- range .In}} + _ Builder = (*{{.Name}}Builder)(nil) +{{- end}} +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl new file mode 100644 index 00000000..996a8f5e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl @@ -0,0 +1,217 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array_test + +import ( + "testing" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/stretchr/testify/assert" +) + +{{range .In}} +func TestNew{{.Name}}Builder(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + ab.Retain() + ab.Release() + + ab.Append(1) + ab.Append(2) + ab.Append(3) + ab.AppendNull() + ab.Append(5) + ab.Append(6) + ab.AppendNull() + ab.Append(8) + ab.Append(9) + ab.Append(10) + ab.AppendValueFromString(11) + + // check state of builder before New{{.Name}}Array + assert.Equal(t, 11, ab.Len(), "unexpected Len()") + assert.Equal(t, 2, ab.NullN(), "unexpected NullN()") + + a := ab.New{{.Name}}Array() + + // check state of builder after New{{.Name}}Array + assert.Zero(t, ab.Len(), "unexpected ArrayBuilder.Len(), New{{.Name}}Array did not reset state") + assert.Zero(t, ab.Cap(), "unexpected ArrayBuilder.Cap(), New{{.Name}}Array did not reset state") + assert.Zero(t, ab.NullN(), "unexpected ArrayBuilder.NullN(), New{{.Name}}Array did not reset state") + + // check state of array + assert.Equal(t, 2, a.NullN(), "unexpected null count") + assert.Equal(t, []{{or .QualifiedType .Type}}{1, 2, 3, 0, 5, 6, 0, 8, 9, 10, 11}, a.{{.Name}}Values(), "unexpected {{.Name}}Values") + assert.Equal(t, []byte{0xb7}, a.NullBitmapBytes()[:1]) // 4 bytes due to minBuilderCapacity + assert.Len(t, a.{{.Name}}Values(), 10, "unexpected length of {{.Name}}Values") + + a.Release() + + ab.Append(7) + ab.Append(8) + + a = ab.New{{.Name}}Array() + + assert.Equal(t, 0, a.NullN()) + assert.Equal(t, []{{or .QualifiedType .Type}}{7, 8}, a.{{.Name}}Values()) + assert.Len(t, a.{{.Name}}Values(), 2) + + a.Release() + + var ( + want = []{{or .QualifiedType .Type}}{1, 2, 3, 4} + valids = []bool{true, true, false, true} + ) + + ab.AppendValues(want, valids) + a = ab.New{{.Name}}Array() + + sub := array.MakeFromData(a.Data()) + defer sub.Release() + + if got, want := sub.DataType().ID(), a.DataType().ID(); got != want { + t.Fatalf("invalid type: got=%q, want=%q", got, want) + } + + if _, ok := sub.(*array.{{.Name}}); !ok { + t.Fatalf("could not type-assert to array.{{.Name}}") + } + + if got, want := a.String(), `[1 2 (null) 4]`; got != want { + t.Fatalf("got=%q, want=%q", got, want) + } + + slice := array.NewSliceData(a.Data(), 2, 4) + defer slice.Release() + + sub1 := array.MakeFromData(slice) + defer sub1.Release() + + v, ok := sub1.(*array.{{.Name}}) + if !ok { + t.Fatalf("could not type-assert to array.{{.Name}}") + } + + if got, want := v.String(), `[(null) 4]`; got != want { + t.Fatalf("got=%q, want=%q", got, want) + } + + a.Release() +} + +func Test{{.Name}}Builder_AppendValues(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3} + ab.AppendValues(exp, nil) + a := ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + + a.Release() +} + +func Test{{.Name}}Builder_Empty(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3} + + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + a := ab.New{{.Name}}Array() + assert.Zero(t, a.Len()) + a.Release() + + ab.AppendValues(nil, nil) + a = ab.New{{.Name}}Array() + assert.Zero(t, a.Len()) + a.Release() + + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + ab.AppendValues(exp, nil) + a = ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + a.Release() + + ab.AppendValues(exp, nil) + ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil) + a = ab.New{{.Name}}Array() + assert.Equal(t, exp, a.{{.Name}}Values()) + a.Release() +} + +func Test{{.Name}}Builder_Resize(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} + dtype := &arrow.{{.Name}}Type{Unit: arrow.Second} + ab := array.New{{.Name}}Builder(mem, dtype) +{{else}} + ab := array.New{{.Name}}Builder(mem) +{{end -}} + defer ab.Release() + + assert.Equal(t, 0, ab.Cap()) + assert.Equal(t, 0, ab.Len()) + + ab.Reserve(63) + assert.Equal(t, 64, ab.Cap()) + assert.Equal(t, 0, ab.Len()) + + for i := 0; i < 63; i++ { + ab.Append(0) + } + assert.Equal(t, 64, ab.Cap()) + assert.Equal(t, 63, ab.Len()) + + ab.Resize(5) + assert.Equal(t, 5, ab.Len()) + + ab.Resize(32) + assert.Equal(t, 5, ab.Len()) +} +{{end}} + + diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/record.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/record.go new file mode 100644 index 00000000..b00a59d1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/record.go @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// RecordReader reads a stream of records. +type RecordReader interface { + Retain() + Release() + + Schema() *arrow.Schema + + Next() bool + Record() arrow.Record + Err() error +} + +// simpleRecords is a simple iterator over a collection of records. +type simpleRecords struct { + refCount int64 + + schema *arrow.Schema + recs []arrow.Record + cur arrow.Record +} + +// NewRecordReader returns a simple iterator over the given slice of records. +func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (*simpleRecords, error) { + rs := &simpleRecords{ + refCount: 1, + schema: schema, + recs: recs, + cur: nil, + } + + for _, rec := range rs.recs { + rec.Retain() + } + + for _, rec := range recs { + if !rec.Schema().Equal(rs.schema) { + rs.Release() + return nil, fmt.Errorf("arrow/array: mismatch schema") + } + } + + return rs, nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (rs *simpleRecords) Retain() { + atomic.AddInt64(&rs.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (rs *simpleRecords) Release() { + debug.Assert(atomic.LoadInt64(&rs.refCount) > 0, "too many releases") + + if atomic.AddInt64(&rs.refCount, -1) == 0 { + if rs.cur != nil { + rs.cur.Release() + } + for _, rec := range rs.recs { + rec.Release() + } + rs.recs = nil + } +} + +func (rs *simpleRecords) Schema() *arrow.Schema { return rs.schema } +func (rs *simpleRecords) Record() arrow.Record { return rs.cur } +func (rs *simpleRecords) Next() bool { + if len(rs.recs) == 0 { + return false + } + if rs.cur != nil { + rs.cur.Release() + } + rs.cur = rs.recs[0] + rs.recs = rs.recs[1:] + return true +} +func (rs *simpleRecords) Err() error { return nil } + +// simpleRecord is a basic, non-lazy in-memory record batch. +type simpleRecord struct { + refCount int64 + + schema *arrow.Schema + + rows int64 + arrs []arrow.Array +} + +// NewRecord returns a basic, non-lazy in-memory record batch. +// +// NewRecord panics if the columns and schema are inconsistent. +// NewRecord panics if rows is larger than the height of the columns. +func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) *simpleRecord { + rec := &simpleRecord{ + refCount: 1, + schema: schema, + rows: nrows, + arrs: make([]arrow.Array, len(cols)), + } + copy(rec.arrs, cols) + for _, arr := range rec.arrs { + arr.Retain() + } + + if rec.rows < 0 { + switch len(rec.arrs) { + case 0: + rec.rows = 0 + default: + rec.rows = int64(rec.arrs[0].Len()) + } + } + + err := rec.validate() + if err != nil { + rec.Release() + panic(err) + } + + return rec +} + +func (rec *simpleRecord) SetColumn(i int, arr arrow.Array) (arrow.Record, error) { + if i < 0 || i >= len(rec.arrs) { + return nil, fmt.Errorf("arrow/array: column index out of range [0, %d): got=%d", len(rec.arrs), i) + } + + if arr.Len() != int(rec.rows) { + return nil, fmt.Errorf("arrow/array: mismatch number of rows in column %q: got=%d, want=%d", + rec.schema.Field(i).Name, + arr.Len(), rec.rows, + ) + } + + f := rec.schema.Field(i) + if !arrow.TypeEqual(f.Type, arr.DataType()) { + return nil, fmt.Errorf("arrow/array: column %q type mismatch: got=%v, want=%v", + f.Name, + arr.DataType(), f.Type, + ) + } + arrs := make([]arrow.Array, len(rec.arrs)) + copy(arrs, rec.arrs) + arrs[i] = arr + + return NewRecord(rec.schema, arrs, rec.rows), nil +} + +func (rec *simpleRecord) validate() error { + if rec.rows == 0 && len(rec.arrs) == 0 { + return nil + } + + if len(rec.arrs) != len(rec.schema.Fields()) { + return fmt.Errorf("arrow/array: number of columns/fields mismatch") + } + + for i, arr := range rec.arrs { + f := rec.schema.Field(i) + if int64(arr.Len()) < rec.rows { + return fmt.Errorf("arrow/array: mismatch number of rows in column %q: got=%d, want=%d", + f.Name, + arr.Len(), rec.rows, + ) + } + if !arrow.TypeEqual(f.Type, arr.DataType()) { + return fmt.Errorf("arrow/array: column %q type mismatch: got=%v, want=%v", + f.Name, + arr.DataType(), f.Type, + ) + } + } + return nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (rec *simpleRecord) Retain() { + atomic.AddInt64(&rec.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (rec *simpleRecord) Release() { + debug.Assert(atomic.LoadInt64(&rec.refCount) > 0, "too many releases") + + if atomic.AddInt64(&rec.refCount, -1) == 0 { + for _, arr := range rec.arrs { + arr.Release() + } + rec.arrs = nil + } +} + +func (rec *simpleRecord) Schema() *arrow.Schema { return rec.schema } +func (rec *simpleRecord) NumRows() int64 { return rec.rows } +func (rec *simpleRecord) NumCols() int64 { return int64(len(rec.arrs)) } +func (rec *simpleRecord) Columns() []arrow.Array { return rec.arrs } +func (rec *simpleRecord) Column(i int) arrow.Array { return rec.arrs[i] } +func (rec *simpleRecord) ColumnName(i int) string { return rec.schema.Field(i).Name } + +// NewSlice constructs a zero-copy slice of the record with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned record must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the record array. +// NewSlice panics if j < i. +func (rec *simpleRecord) NewSlice(i, j int64) arrow.Record { + arrs := make([]arrow.Array, len(rec.arrs)) + for ii, arr := range rec.arrs { + arrs[ii] = NewSlice(arr, i, j) + } + defer func() { + for _, arr := range arrs { + arr.Release() + } + }() + return NewRecord(rec.schema, arrs, j-i) +} + +func (rec *simpleRecord) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "record:\n %v\n", rec.schema) + fmt.Fprintf(o, " rows: %d\n", rec.rows) + for i, col := range rec.arrs { + fmt.Fprintf(o, " col[%d][%s]: %v\n", i, rec.schema.Field(i).Name, col) + } + + return o.String() +} + +func (rec *simpleRecord) MarshalJSON() ([]byte, error) { + arr := RecordToStructArray(rec) + defer arr.Release() + return arr.MarshalJSON() +} + +// RecordBuilder eases the process of building a Record, iteratively, from +// a known Schema. +type RecordBuilder struct { + refCount int64 + mem memory.Allocator + schema *arrow.Schema + fields []Builder +} + +// NewRecordBuilder returns a builder, using the provided memory allocator and a schema. +func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder { + b := &RecordBuilder{ + refCount: 1, + mem: mem, + schema: schema, + fields: make([]Builder, len(schema.Fields())), + } + + for i, f := range schema.Fields() { + b.fields[i] = NewBuilder(b.mem, f.Type) + } + + return b +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *RecordBuilder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Release decreases the reference count by 1. +func (b *RecordBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + for _, f := range b.fields { + f.Release() + } + b.fields = nil + } +} + +func (b *RecordBuilder) Schema() *arrow.Schema { return b.schema } +func (b *RecordBuilder) Fields() []Builder { return b.fields } +func (b *RecordBuilder) Field(i int) Builder { return b.fields[i] } + +func (b *RecordBuilder) Reserve(size int) { + for _, f := range b.fields { + f.Reserve(size) + } +} + +// NewRecord creates a new record from the memory buffers and resets the +// RecordBuilder so it can be used to build a new record. +// +// The returned Record must be Release()'d after use. +// +// NewRecord panics if the fields' builder do not have the same length. +func (b *RecordBuilder) NewRecord() arrow.Record { + cols := make([]arrow.Array, len(b.fields)) + rows := int64(0) + + defer func(cols []arrow.Array) { + for _, col := range cols { + if col == nil { + continue + } + col.Release() + } + }(cols) + + for i, f := range b.fields { + cols[i] = f.NewArray() + irow := int64(cols[i].Len()) + if i > 0 && irow != rows { + panic(fmt.Errorf("arrow/array: field %d has %d rows. want=%d", i, irow, rows)) + } + rows = irow + } + + return NewRecord(b.schema, cols, rows) +} + +// UnmarshalJSON for record builder will read in a single object and add the values +// to each field in the recordbuilder, missing fields will get a null and unexpected +// keys will be ignored. If reading in an array of records as a single batch, then use +// a structbuilder and use RecordFromStruct. +func (b *RecordBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + // should start with a '{' + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '{' { + return fmt.Errorf("record should start with '{', not %s", t) + } + + keylist := make(map[string]bool) + for dec.More() { + keyTok, err := dec.Token() + if err != nil { + return err + } + + key := keyTok.(string) + if keylist[key] { + return fmt.Errorf("key %s shows up twice in row to be decoded", key) + } + keylist[key] = true + + indices := b.schema.FieldIndices(key) + if len(indices) == 0 { + var extra interface{} + if err := dec.Decode(&extra); err != nil { + return err + } + continue + } + + if err := b.fields[indices[0]].UnmarshalOne(dec); err != nil { + return err + } + } + + for i, f := range b.schema.Fields() { + if !keylist[f.Name] { + b.fields[i].AppendNull() + } + } + return nil +} + +var ( + _ arrow.Record = (*simpleRecord)(nil) + _ RecordReader = (*simpleRecords)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/string.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/string.go new file mode 100644 index 00000000..20a67f25 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/string.go @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// String represents an immutable sequence of variable-length UTF-8 strings. +type String struct { + array + offsets []int32 + values string +} + +// NewStringData constructs a new String array from data. +func NewStringData(data arrow.ArrayData) *String { + a := &String{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the String with a different set of Data. +func (a *String) Reset(data arrow.ArrayData) { + a.setData(data.(*Data)) +} + +// Value returns the slice at index i. This value should not be mutated. +func (a *String) Value(i int) string { + i = i + a.array.data.offset + return a.values[a.offsets[i]:a.offsets[i+1]] +} +func (a *String) ValueStr(i int) string { + if a.IsNull(i) { + return "(null)" + } else { + return a.Value(i) + } +} + +// ValueOffset returns the offset of the value at index i. +func (a *String) ValueOffset(i int) int { + if i < 0 || i > a.array.data.length { + panic("arrow/array: index out of range") + } + return int(a.offsets[i+a.array.data.offset]) +} + +func (a *String) ValueOffset64(i int) int64 { + return int64(a.ValueOffset(i)) +} + +func (a *String) ValueOffsets() []int32 { + beg := a.array.data.offset + end := beg + a.array.data.length + 1 + return a.offsets[beg:end] +} + +func (a *String) ValueBytes() (ret []byte) { + beg := a.array.data.offset + end := beg + a.array.data.length + data := a.values[a.offsets[beg]:a.offsets[end]] + + s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) + s.Data = (*reflect.StringHeader)(unsafe.Pointer(&data)).Data + s.Len = len(data) + s.Cap = len(data) + return +} + +func (a *String) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *String) setData(data *Data) { + if len(data.buffers) != 3 { + panic("arrow/array: len(data.buffers) != 3") + } + + a.array.setData(data) + + if vdata := data.buffers[2]; vdata != nil { + b := vdata.Bytes() + a.values = *(*string)(unsafe.Pointer(&b)) + } + + if offsets := data.buffers[1]; offsets != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(offsets.Bytes()) + } + + if a.array.data.length < 1 { + return + } + + expNumOffsets := a.array.data.offset + a.array.data.length + 1 + if len(a.offsets) < expNumOffsets { + panic(fmt.Errorf("arrow/array: string offset buffer must have at least %d values", expNumOffsets)) + } + + if int(a.offsets[expNumOffsets-1]) > len(a.values) { + panic("arrow/array: string offsets out of bounds of data buffer") + } +} + +func (a *String) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.Value(i) + } + return nil +} + +func (a *String) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { + vals[i] = a.Value(i) + } else { + vals[i] = nil + } + } + return json.Marshal(vals) +} + +func arrayEqualString(left, right *String) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// String represents an immutable sequence of variable-length UTF-8 strings. +type LargeString struct { + array + offsets []int64 + values string +} + +// NewStringData constructs a new String array from data. +func NewLargeStringData(data arrow.ArrayData) *LargeString { + a := &LargeString{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the String with a different set of Data. +func (a *LargeString) Reset(data arrow.ArrayData) { + a.setData(data.(*Data)) +} + +// Value returns the slice at index i. This value should not be mutated. +func (a *LargeString) Value(i int) string { + i = i + a.array.data.offset + return a.values[a.offsets[i]:a.offsets[i+1]] +} +func (a *LargeString) ValueStr(i int) string { return a.Value(i) } + +// ValueOffset returns the offset of the value at index i. +func (a *LargeString) ValueOffset(i int) int64 { + if i < 0 || i > a.array.data.length { + panic("arrow/array: index out of range") + } + return a.offsets[i+a.array.data.offset] +} + +func (a *LargeString) ValueOffset64(i int) int64 { + return a.ValueOffset(i) +} + +func (a *LargeString) ValueOffsets() []int64 { + beg := a.array.data.offset + end := beg + a.array.data.length + 1 + return a.offsets[beg:end] +} + +func (a *LargeString) ValueBytes() (ret []byte) { + beg := a.array.data.offset + end := beg + a.array.data.length + data := a.values[a.offsets[beg]:a.offsets[end]] + + s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) + s.Data = (*reflect.StringHeader)(unsafe.Pointer(&data)).Data + s.Len = len(data) + s.Cap = len(data) + return +} + +func (a *LargeString) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString("(null)") + default: + fmt.Fprintf(o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *LargeString) setData(data *Data) { + if len(data.buffers) != 3 { + panic("arrow/array: len(data.buffers) != 3") + } + + a.array.setData(data) + + if vdata := data.buffers[2]; vdata != nil { + b := vdata.Bytes() + a.values = *(*string)(unsafe.Pointer(&b)) + } + + if offsets := data.buffers[1]; offsets != nil { + a.offsets = arrow.Int64Traits.CastFromBytes(offsets.Bytes()) + } + + if a.array.data.length < 1 { + return + } + + expNumOffsets := a.array.data.offset + a.array.data.length + 1 + if len(a.offsets) < expNumOffsets { + panic(fmt.Errorf("arrow/array: string offset buffer must have at least %d values", expNumOffsets)) + } + + if int(a.offsets[expNumOffsets-1]) > len(a.values) { + panic("arrow/array: string offsets out of bounds of data buffer") + } +} + +func (a *LargeString) GetOneForMarshal(i int) interface{} { + if a.IsValid(i) { + return a.Value(i) + } + return nil +} + +func (a *LargeString) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualLargeString(left, right *LargeString) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +// A StringBuilder is used to build a String array using the Append methods. +type StringBuilder struct { + *BinaryBuilder +} + +// NewStringBuilder creates a new StringBuilder. +func NewStringBuilder(mem memory.Allocator) *StringBuilder { + b := &StringBuilder{ + BinaryBuilder: NewBinaryBuilder(mem, arrow.BinaryTypes.String), + } + return b +} + +func (b *StringBuilder) Type() arrow.DataType { return arrow.BinaryTypes.String } + +// Append appends a string to the builder. +func (b *StringBuilder) Append(v string) { + b.BinaryBuilder.Append([]byte(v)) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *StringBuilder) AppendValues(v []string, valid []bool) { + b.BinaryBuilder.AppendStringValues(v, valid) +} + +// Value returns the string at index i. +func (b *StringBuilder) Value(i int) string { + return string(b.BinaryBuilder.Value(i)) +} + +// func (b *StringBuilder) UnsafeAppend(v string) { +// b.BinaryBuilder.UnsafeAppend([]byte(v)) +// } + +// NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *StringBuilder) NewArray() arrow.Array { + return b.NewStringArray() +} + +// NewStringArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *StringBuilder) NewStringArray() (a *String) { + data := b.newData() + a = NewStringData(data) + data.Release() + return +} + +func (b *StringBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + b.Append(v) + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(v), + Type: reflect.TypeOf(string("")), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *StringBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *StringBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("string builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +// A LargeStringBuilder is used to build a LargeString array using the Append methods. +// LargeString is for when you need the offset buffer to be 64-bit integers +// instead of 32-bit integers. +type LargeStringBuilder struct { + *BinaryBuilder +} + +// NewStringBuilder creates a new StringBuilder. +func NewLargeStringBuilder(mem memory.Allocator) *LargeStringBuilder { + b := &LargeStringBuilder{ + BinaryBuilder: NewBinaryBuilder(mem, arrow.BinaryTypes.LargeString), + } + return b +} + +func (b *LargeStringBuilder) Type() arrow.DataType { return arrow.BinaryTypes.LargeString } + +// Append appends a string to the builder. +func (b *LargeStringBuilder) Append(v string) { + b.BinaryBuilder.Append([]byte(v)) +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *LargeStringBuilder) AppendValues(v []string, valid []bool) { + b.BinaryBuilder.AppendStringValues(v, valid) +} + +// Value returns the string at index i. +func (b *LargeStringBuilder) Value(i int) string { + return string(b.BinaryBuilder.Value(i)) +} + +// func (b *LargeStringBuilder) UnsafeAppend(v string) { +// b.BinaryBuilder.UnsafeAppend([]byte(v)) +// } + +// NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *LargeStringBuilder) NewArray() arrow.Array { + return b.NewLargeStringArray() +} + +// NewStringArray creates a String array from the memory buffers used by the builder and resets the StringBuilder +// so it can be used to build a new array. +func (b *LargeStringBuilder) NewLargeStringArray() (a *LargeString) { + data := b.newData() + a = NewLargeStringData(data) + data.Release() + return +} + +func (b *LargeStringBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + b.Append(v) + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(v), + Type: reflect.TypeOf(string("")), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *LargeStringBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *LargeStringBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("string builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +type StringLikeBuilder interface { + Builder + Append(string) + UnsafeAppend([]byte) + ReserveData(int) +} + +var ( + _ arrow.Array = (*String)(nil) + _ arrow.Array = (*LargeString)(nil) + _ Builder = (*StringBuilder)(nil) + _ Builder = (*LargeStringBuilder)(nil) + _ StringLikeBuilder = (*StringBuilder)(nil) + _ StringLikeBuilder = (*LargeStringBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go new file mode 100644 index 00000000..77fb3685 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go @@ -0,0 +1,462 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/goccy/go-json" +) + +// Struct represents an ordered sequence of relative types. +type Struct struct { + array + fields []arrow.Array +} + +// NewStructArray constructs a new Struct Array out of the columns passed +// in and the field names. The length of all cols must be the same and +// there should be the same number of columns as names. +func NewStructArray(cols []arrow.Array, names []string) (*Struct, error) { + return NewStructArrayWithNulls(cols, names, nil, 0, 0) +} + +// NewStructArrayWithNulls is like NewStructArray as a convenience function, +// but also takes in a null bitmap, the number of nulls, and an optional offset +// to use for creating the Struct Array. +func NewStructArrayWithNulls(cols []arrow.Array, names []string, nullBitmap *memory.Buffer, nullCount int, offset int) (*Struct, error) { + if len(cols) != len(names) { + return nil, fmt.Errorf("%w: mismatching number of fields and child arrays", arrow.ErrInvalid) + } + if len(cols) == 0 { + return nil, fmt.Errorf("%w: can't infer struct array length with 0 child arrays", arrow.ErrInvalid) + } + length := cols[0].Len() + children := make([]arrow.ArrayData, len(cols)) + fields := make([]arrow.Field, len(cols)) + for i, c := range cols { + if length != c.Len() { + return nil, fmt.Errorf("%w: mismatching child array lengths", arrow.ErrInvalid) + } + children[i] = c.Data() + fields[i].Name = names[i] + fields[i].Type = c.DataType() + fields[i].Nullable = true + } + data := NewData(arrow.StructOf(fields...), length, []*memory.Buffer{nullBitmap}, children, nullCount, offset) + defer data.Release() + return NewStructData(data), nil +} + +// NewStructData returns a new Struct array value from data. +func NewStructData(data arrow.ArrayData) *Struct { + a := &Struct{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *Struct) NumField() int { return len(a.fields) } +func (a *Struct) Field(i int) arrow.Array { return a.fields[i] } + +// ValueStr returns the string representation (as json) of the value at index i. +func (a *Struct) ValueStr(i int) string { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + panic(err) + } + return buf.String() +} + +func (a *Struct) String() string { + o := new(strings.Builder) + o.WriteString("{") + + structBitmap := a.NullBitmapBytes() + for i, v := range a.fields { + if i > 0 { + o.WriteString(" ") + } + if arrow.IsUnion(v.DataType().ID()) { + fmt.Fprintf(o, "%v", v) + continue + } else if !bytes.Equal(structBitmap, v.NullBitmapBytes()) { + masked := a.newStructFieldWithParentValidityMask(i) + fmt.Fprintf(o, "%v", masked) + masked.Release() + continue + } + fmt.Fprintf(o, "%v", v) + } + o.WriteString("}") + return o.String() +} + +// newStructFieldWithParentValidityMask returns the Interface at fieldIndex +// with a nullBitmapBytes adjusted according on the parent struct nullBitmapBytes. +// From the docs: +// "When reading the struct array the parent validity bitmap takes priority." +func (a *Struct) newStructFieldWithParentValidityMask(fieldIndex int) arrow.Array { + field := a.Field(fieldIndex) + nullBitmapBytes := field.NullBitmapBytes() + maskedNullBitmapBytes := make([]byte, len(nullBitmapBytes)) + copy(maskedNullBitmapBytes, nullBitmapBytes) + for i := 0; i < field.Len(); i++ { + if !a.IsValid(i) { + bitutil.ClearBit(maskedNullBitmapBytes, i) + } + } + data := NewSliceData(field.Data(), 0, int64(field.Len())).(*Data) + defer data.Release() + bufs := make([]*memory.Buffer, len(data.Buffers())) + copy(bufs, data.buffers) + bufs[0].Release() + bufs[0] = memory.NewBufferBytes(maskedNullBitmapBytes) + data.buffers = bufs + maskedField := MakeFromData(data) + return maskedField +} + +func (a *Struct) setData(data *Data) { + a.array.setData(data) + a.fields = make([]arrow.Array, len(data.childData)) + for i, child := range data.childData { + if data.offset != 0 || child.Len() != data.length { + sub := NewSliceData(child, int64(data.offset), int64(data.offset+data.length)) + a.fields[i] = MakeFromData(sub) + sub.Release() + } else { + a.fields[i] = MakeFromData(child) + } + } +} + +func (a *Struct) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + tmp := make(map[string]interface{}) + fieldList := a.data.dtype.(*arrow.StructType).Fields() + for j, d := range a.fields { + tmp[fieldList[j].Name] = d.GetOneForMarshal(i) + } + return tmp +} + +func (a *Struct) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualStruct(left, right *Struct) bool { + for i, lf := range left.fields { + rf := right.fields[i] + if !Equal(lf, rf) { + return false + } + } + return true +} + +func (a *Struct) Retain() { + a.array.Retain() + for _, f := range a.fields { + f.Retain() + } +} + +func (a *Struct) Release() { + a.array.Release() + for _, f := range a.fields { + f.Release() + } +} + +type StructBuilder struct { + builder + + dtype arrow.DataType + fields []Builder +} + +// NewStructBuilder returns a builder, using the provided memory allocator. +func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuilder { + b := &StructBuilder{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + fields: make([]Builder, len(dtype.Fields())), + } + for i, f := range dtype.Fields() { + b.fields[i] = NewBuilder(b.mem, f.Type) + } + return b +} + +func (b *StructBuilder) Type() arrow.DataType { + fields := make([]arrow.Field, len(b.fields)) + copy(fields, b.dtype.(*arrow.StructType).Fields()) + for i, b := range b.fields { + fields[i].Type = b.Type() + } + return arrow.StructOf(fields...) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *StructBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + + for _, f := range b.fields { + f.Release() + } + } +} + +func (b *StructBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + if !v { + for _, f := range b.fields { + f.AppendNull() + } + } +} + +func (b *StructBuilder) AppendValues(valids []bool) { + b.Reserve(len(valids)) + b.builder.unsafeAppendBoolsToBitmap(valids, len(valids)) +} + +func (b *StructBuilder) AppendNull() { b.Append(false) } + +func (b *StructBuilder) AppendEmptyValue() { + b.Append(true) + for _, f := range b.fields { + f.AppendEmptyValue() + } +} + +func (b *StructBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *StructBuilder) init(capacity int) { + b.builder.init(capacity) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *StructBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + for _, f := range b.fields { + f.Reserve(n) + } +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *StructBuilder) Resize(n int) { + b.resizeHelper(n) + for _, f := range b.fields { + f.Resize(n) + } +} + +func (b *StructBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *StructBuilder) NumField() int { return len(b.fields) } +func (b *StructBuilder) FieldBuilder(i int) Builder { return b.fields[i] } + +// NewArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder +// so it can be used to build a new array. +func (b *StructBuilder) NewArray() arrow.Array { + return b.NewStructArray() +} + +// NewStructArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder +// so it can be used to build a new array. +func (b *StructBuilder) NewStructArray() (a *Struct) { + data := b.newData() + a = NewStructData(data) + data.Release() + return +} + +func (b *StructBuilder) newData() (data *Data) { + fields := make([]arrow.ArrayData, len(b.fields)) + for i, f := range b.fields { + arr := f.NewArray() + defer arr.Release() + fields[i] = arr.Data() + } + + data = NewData( + b.Type(), b.length, + []*memory.Buffer{ + b.nullBitmap, + }, + fields, + b.nulls, + 0, + ) + b.reset() + + return +} + +func (b *StructBuilder) AppendValueFromString(s string) error { + if !strings.HasPrefix(s, "{") && !strings.HasSuffix(s, "}") { + return fmt.Errorf("%w: invalid string for struct should be be of form: {*}", arrow.ErrInvalid,) + } + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + +func (b *StructBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('{'): + b.Append(true) + keylist := make(map[string]bool) + for dec.More() { + keyTok, err := dec.Token() + if err != nil { + return err + } + + key, ok := keyTok.(string) + if !ok { + return errors.New("missing key") + } + + if keylist[key] { + return fmt.Errorf("key %s is specified twice", key) + } + + keylist[key] = true + + idx, ok := b.dtype.(*arrow.StructType).FieldIdx(key) + if !ok { + var extra interface{} + dec.Decode(&extra) + continue + } + + if err := b.fields[idx].UnmarshalOne(dec); err != nil { + return err + } + } + + // Append null values to all optional fields that were not presented in the json input + for _, field := range b.dtype.(*arrow.StructType).Fields() { + if !field.Nullable { + continue + } + idx, _ := b.dtype.(*arrow.StructType).FieldIdx(field.Name) + if _, hasKey := keylist[field.Name]; !hasKey { + b.fields[idx].AppendNull() + } + } + + // consume '}' + _, err := dec.Token() + return err + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Struct: fmt.Sprint(b.dtype), + } + } + return nil +} + +func (b *StructBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *StructBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("struct builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Struct)(nil) + _ Builder = (*StructBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/table.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/table.go new file mode 100644 index 00000000..88362c74 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/table.go @@ -0,0 +1,381 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "errors" + "fmt" + "math" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +// NewColumnSlice returns a new zero-copy slice of the column with the indicated +// indices i and j, corresponding to the column's array[i:j]. +// The returned column must be Release()'d after use. +// +// NewColSlice panics if the slice is outside the valid range of the column's array. +// NewColSlice panics if j < i. +func NewColumnSlice(col *arrow.Column, i, j int64) *arrow.Column { + slice := NewChunkedSlice(col.Data(), i, j) + defer slice.Release() + return arrow.NewColumn(col.Field(), slice) +} + +// NewChunkedSlice constructs a zero-copy slice of the chunked array with the indicated +// indices i and j, corresponding to array[i:j]. +// The returned chunked array must be Release()'d after use. +// +// NewSlice panics if the slice is outside the valid range of the input array. +// NewSlice panics if j < i. +func NewChunkedSlice(a *arrow.Chunked, i, j int64) *arrow.Chunked { + if j > int64(a.Len()) || i > j || i > int64(a.Len()) { + panic("arrow/array: index out of range") + } + + var ( + cur = 0 + beg = i + sz = j - i + chunks = make([]arrow.Array, 0, len(a.Chunks())) + ) + + for cur < len(a.Chunks()) && beg >= int64(a.Chunks()[cur].Len()) { + beg -= int64(a.Chunks()[cur].Len()) + cur++ + } + + for cur < len(a.Chunks()) && sz > 0 { + arr := a.Chunks()[cur] + end := beg + sz + if end > int64(arr.Len()) { + end = int64(arr.Len()) + } + chunks = append(chunks, NewSlice(arr, beg, end)) + sz -= int64(arr.Len()) - beg + beg = 0 + cur++ + } + chunks = chunks[:len(chunks):len(chunks)] + defer func() { + for _, chunk := range chunks { + chunk.Release() + } + }() + + return arrow.NewChunked(a.DataType(), chunks) +} + +// simpleTable is a basic, non-lazy in-memory table. +type simpleTable struct { + refCount int64 + + rows int64 + cols []arrow.Column + + schema *arrow.Schema +} + +// NewTable returns a new basic, non-lazy in-memory table. +// If rows is negative, the number of rows will be inferred from the height +// of the columns. +// +// NewTable panics if the columns and schema are inconsistent. +// NewTable panics if rows is larger than the height of the columns. +func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTable { + tbl := simpleTable{ + refCount: 1, + rows: rows, + cols: cols, + schema: schema, + } + + if tbl.rows < 0 { + switch len(tbl.cols) { + case 0: + tbl.rows = 0 + default: + tbl.rows = int64(tbl.cols[0].Len()) + } + } + + // validate the table and its constituents. + // note we retain the columns after having validated the table + // in case the validation fails and panics (and would otherwise leak + // a ref-count on the columns.) + tbl.validate() + + for i := range tbl.cols { + tbl.cols[i].Retain() + } + + return &tbl +} + +// NewTableFromSlice is a convenience function to create a table from a slice +// of slices of arrow.Array. +// +// Like other NewTable functions this can panic if: +// - len(schema.Fields) != len(data) +// - the total length of each column's array slice (ie: number of rows +// in the column) aren't the same for all columns. +func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable { + if len(data) != len(schema.Fields()) { + panic("array/table: mismatch in number of columns and data for creating a table") + } + + cols := make([]arrow.Column, len(schema.Fields())) + for i, arrs := range data { + field := schema.Field(i) + chunked := arrow.NewChunked(field.Type, arrs) + cols[i] = *arrow.NewColumn(field, chunked) + chunked.Release() + } + + tbl := simpleTable{ + refCount: 1, + schema: schema, + cols: cols, + rows: int64(cols[0].Len()), + } + + defer func() { + if r := recover(); r != nil { + // if validate panics, let's release the columns + // so that we don't leak them, then propagate the panic + for _, c := range cols { + c.Release() + } + panic(r) + } + }() + // validate the table and its constituents. + tbl.validate() + + return &tbl +} + +// NewTableFromRecords returns a new basic, non-lazy in-memory table. +// +// NewTableFromRecords panics if the records and schema are inconsistent. +func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) *simpleTable { + arrs := make([]arrow.Array, len(recs)) + cols := make([]arrow.Column, len(schema.Fields())) + + defer func(cols []arrow.Column) { + for i := range cols { + cols[i].Release() + } + }(cols) + + for i := range cols { + field := schema.Field(i) + for j, rec := range recs { + arrs[j] = rec.Column(i) + } + chunk := arrow.NewChunked(field.Type, arrs) + cols[i] = *arrow.NewColumn(field, chunk) + chunk.Release() + } + + return NewTable(schema, cols, -1) +} + +func (tbl *simpleTable) Schema() *arrow.Schema { return tbl.schema } +func (tbl *simpleTable) NumRows() int64 { return tbl.rows } +func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols)) } +func (tbl *simpleTable) Column(i int) *arrow.Column { return &tbl.cols[i] } + +func (tbl *simpleTable) validate() { + if len(tbl.cols) != len(tbl.schema.Fields()) { + panic(errors.New("arrow/array: table schema mismatch")) + } + for i, col := range tbl.cols { + if !col.Field().Equal(tbl.schema.Field(i)) { + panic(fmt.Errorf("arrow/array: column field %q is inconsistent with schema", col.Name())) + } + + if int64(col.Len()) < tbl.rows { + panic(fmt.Errorf("arrow/array: column %q expected length >= %d but got length %d", col.Name(), tbl.rows, col.Len())) + } + } +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (tbl *simpleTable) Retain() { + atomic.AddInt64(&tbl.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (tbl *simpleTable) Release() { + debug.Assert(atomic.LoadInt64(&tbl.refCount) > 0, "too many releases") + + if atomic.AddInt64(&tbl.refCount, -1) == 0 { + for i := range tbl.cols { + tbl.cols[i].Release() + } + tbl.cols = nil + } +} + +// TableReader is a Record iterator over a (possibly chunked) Table +type TableReader struct { + refCount int64 + + tbl arrow.Table + cur int64 // current row + max int64 // total number of rows + rec arrow.Record // current Record + chksz int64 // chunk size + + chunks []*arrow.Chunked + slots []int // chunk indices + offsets []int64 // chunk offsets +} + +// NewTableReader returns a new TableReader to iterate over the (possibly chunked) Table. +// if chunkSize is <= 0, the biggest possible chunk will be selected. +func NewTableReader(tbl arrow.Table, chunkSize int64) *TableReader { + ncols := tbl.NumCols() + tr := &TableReader{ + refCount: 1, + tbl: tbl, + cur: 0, + max: int64(tbl.NumRows()), + chksz: chunkSize, + chunks: make([]*arrow.Chunked, ncols), + slots: make([]int, ncols), + offsets: make([]int64, ncols), + } + tr.tbl.Retain() + + if tr.chksz <= 0 { + tr.chksz = math.MaxInt64 + } + + for i := range tr.chunks { + col := tr.tbl.Column(i) + tr.chunks[i] = col.Data() + tr.chunks[i].Retain() + } + return tr +} + +func (tr *TableReader) Schema() *arrow.Schema { return tr.tbl.Schema() } +func (tr *TableReader) Record() arrow.Record { return tr.rec } + +func (tr *TableReader) Next() bool { + if tr.cur >= tr.max { + return false + } + + if tr.rec != nil { + tr.rec.Release() + } + + // determine the minimum contiguous slice across all columns + chunksz := imin64(tr.max, tr.chksz) + chunks := make([]arrow.Array, len(tr.chunks)) + for i := range chunks { + j := tr.slots[i] + chunk := tr.chunks[i].Chunk(j) + remain := int64(chunk.Len()) - tr.offsets[i] + if remain < chunksz { + chunksz = remain + } + + chunks[i] = chunk + } + + // slice the chunks, advance each chunk slot as appropriate. + batch := make([]arrow.Array, len(tr.chunks)) + for i, chunk := range chunks { + var slice arrow.Array + offset := tr.offsets[i] + switch int64(chunk.Len()) - offset { + case chunksz: + tr.slots[i]++ + tr.offsets[i] = 0 + if offset > 0 { + // need to slice + slice = NewSlice(chunk, offset, offset+chunksz) + } else { + // no need to slice + slice = chunk + slice.Retain() + } + default: + tr.offsets[i] += chunksz + slice = NewSlice(chunk, offset, offset+chunksz) + } + batch[i] = slice + } + + tr.cur += chunksz + tr.rec = NewRecord(tr.tbl.Schema(), batch, chunksz) + + for _, arr := range batch { + arr.Release() + } + + return true +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (tr *TableReader) Retain() { + atomic.AddInt64(&tr.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (tr *TableReader) Release() { + debug.Assert(atomic.LoadInt64(&tr.refCount) > 0, "too many releases") + + if atomic.AddInt64(&tr.refCount, -1) == 0 { + tr.tbl.Release() + for _, chk := range tr.chunks { + chk.Release() + } + if tr.rec != nil { + tr.rec.Release() + } + tr.tbl = nil + tr.chunks = nil + tr.slots = nil + tr.offsets = nil + } +} +func (tr *TableReader) Err() error { return nil } + +func imin64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +var ( + _ arrow.Table = (*simpleTable)(nil) + _ RecordReader = (*TableReader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/union.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/union.go new file mode 100644 index 00000000..26352517 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/union.go @@ -0,0 +1,1342 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "errors" + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/goccy/go-json" +) + +// Union is a convenience interface to encompass both Sparse and Dense +// union array types. +type Union interface { + arrow.Array + // NumFields returns the number of child fields in this union. + // Equivalent to len(UnionType().Fields()) + NumFields() int + // Validate returns an error if there are any issues with the lengths + // or types of the children arrays mismatching with the Type of the + // Union Array. nil is returned if there are no problems. + Validate() error + // ValidateFull runs the same checks that Validate() does, but additionally + // checks that all childIDs are valid (>= 0 || ==InvalidID) and for + // dense unions validates that all offsets are within the bounds of their + // respective child. + ValidateFull() error + // TypeCodes returns the type id buffer for the union Array, equivalent to + // Data().Buffers()[1]. Note: This will not account for any slice offset. + TypeCodes() *memory.Buffer + // RawTypeCodes returns a slice of UnionTypeCodes properly accounting for + // any slice offset. + RawTypeCodes() []arrow.UnionTypeCode + // TypeCode returns the logical type code of the value at the requested index + TypeCode(i int) arrow.UnionTypeCode + // ChildID returns the index of the physical child containing the value + // at the requested index. Equivalent to: + // + // arr.UnionType().ChildIDs()[arr.RawTypeCodes()[i+arr.Data().Offset()]] + ChildID(i int) int + // UnionType is a convenience function to retrieve the properly typed UnionType + // instead of having to call DataType() and manually assert the type. + UnionType() arrow.UnionType + // Mode returns the union mode of the underlying Array, either arrow.SparseMode + // or arrow.DenseMode. + Mode() arrow.UnionMode + // Field returns the requested child array for this union. Returns nil if a + // non-existent position is passed in. + // + // The appropriate child for an index can be retrieved with Field(ChildID(index)) + Field(pos int) arrow.Array +} + +const kMaxElems = math.MaxInt32 + +type union struct { + array + + unionType arrow.UnionType + typecodes []arrow.UnionTypeCode + + children []arrow.Array +} + +func (a *union) Retain() { + a.array.Retain() + for _, c := range a.children { + c.Retain() + } +} + +func (a *union) Release() { + a.array.Release() + for _, c := range a.children { + c.Release() + } +} + +func (a *union) NumFields() int { return len(a.unionType.Fields()) } + +func (a *union) Mode() arrow.UnionMode { return a.unionType.Mode() } + +func (a *union) UnionType() arrow.UnionType { return a.unionType } + +func (a *union) TypeCodes() *memory.Buffer { + return a.data.buffers[1] +} + +func (a *union) RawTypeCodes() []arrow.UnionTypeCode { + if a.data.length > 0 { + return a.typecodes[a.data.offset:] + } + return []arrow.UnionTypeCode{} +} + +func (a *union) TypeCode(i int) arrow.UnionTypeCode { + return a.typecodes[i+a.data.offset] +} + +func (a *union) ChildID(i int) int { + return a.unionType.ChildIDs()[a.typecodes[i+a.data.offset]] +} + +func (a *union) setData(data *Data) { + a.unionType = data.dtype.(arrow.UnionType) + debug.Assert(len(data.buffers) >= 2, "arrow/array: invalid number of union array buffers") + + if data.length > 0 { + a.typecodes = arrow.Int8Traits.CastFromBytes(data.buffers[1].Bytes()) + } else { + a.typecodes = []int8{} + } + a.children = make([]arrow.Array, len(data.childData)) + for i, child := range data.childData { + if a.unionType.Mode() == arrow.SparseMode && (data.offset != 0 || child.Len() != data.length) { + child = NewSliceData(child, int64(data.offset), int64(data.offset+data.length)) + defer child.Release() + } + a.children[i] = MakeFromData(child) + } + a.array.setData(data) +} + +func (a *union) Field(pos int) (result arrow.Array) { + if pos < 0 || pos >= len(a.children) { + return nil + } + + return a.children[pos] +} + +func (a *union) Validate() error { + fields := a.unionType.Fields() + for i, f := range fields { + fieldData := a.data.childData[i] + if a.unionType.Mode() == arrow.SparseMode && fieldData.Len() < a.data.length+a.data.offset { + return fmt.Errorf("arrow/array: sparse union child array #%d has length smaller than expected for union array (%d < %d)", + i, fieldData.Len(), a.data.length+a.data.offset) + } + + if !arrow.TypeEqual(f.Type, fieldData.DataType()) { + return fmt.Errorf("arrow/array: union child array #%d does not match type field %s vs %s", + i, fieldData.DataType(), f.Type) + } + } + return nil +} + +func (a *union) ValidateFull() error { + if err := a.Validate(); err != nil { + return err + } + + childIDs := a.unionType.ChildIDs() + codesMap := a.unionType.TypeCodes() + codes := a.RawTypeCodes() + + for i := 0; i < a.data.length; i++ { + code := codes[i] + if code < 0 || childIDs[code] == arrow.InvalidUnionChildID { + return fmt.Errorf("arrow/array: union value at position %d has invalid type id %d", i, code) + } + } + + if a.unionType.Mode() == arrow.DenseMode { + // validate offsets + + // map logical typeid to child length + var childLengths [256]int64 + for i := range a.unionType.Fields() { + childLengths[codesMap[i]] = int64(a.data.childData[i].Len()) + } + + // check offsets are in bounds + var lastOffsets [256]int64 + offsets := arrow.Int32Traits.CastFromBytes(a.data.buffers[2].Bytes())[a.data.offset:] + for i := int64(0); i < int64(a.data.length); i++ { + code := codes[i] + offset := offsets[i] + switch { + case offset < 0: + return fmt.Errorf("arrow/array: union value at position %d has negative offset %d", i, offset) + case offset >= int32(childLengths[code]): + return fmt.Errorf("arrow/array: union value at position %d has offset larger than child length (%d >= %d)", + i, offset, childLengths[code]) + case offset < int32(lastOffsets[code]): + return fmt.Errorf("arrow/array: union value at position %d has non-monotonic offset %d", i, offset) + } + lastOffsets[code] = int64(offset) + } + } + + return nil +} + +// SparseUnion represents an array where each logical value is taken from +// a single child. A buffer of 8-bit type ids indicates which child a given +// logical value is to be taken from. This is represented as the ChildID, +// which is the index into the list of children. +// +// In a sparse union, each child array will have the same length as the +// union array itself, regardless of how many values in the union actually +// refer to it. +// +// Unlike most other arrays, unions do not have a top-level validity bitmap. +type SparseUnion struct { + union +} + +// NewSparseUnion constructs a union array using the given type, length, list of +// children and buffer of typeIDs with the given offset. +func NewSparseUnion(dt *arrow.SparseUnionType, length int, children []arrow.Array, typeIDs *memory.Buffer, offset int) *SparseUnion { + childData := make([]arrow.ArrayData, len(children)) + for i, c := range children { + childData[i] = c.Data() + } + data := NewData(dt, length, []*memory.Buffer{nil, typeIDs}, childData, 0, offset) + defer data.Release() + return NewSparseUnionData(data) +} + +// NewSparseUnionData constructs a SparseUnion array from the given ArrayData object. +func NewSparseUnionData(data arrow.ArrayData) *SparseUnion { + a := &SparseUnion{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// NewSparseUnionFromArrays constructs a new SparseUnion array with the provided +// values. +// +// typeIDs *must* be an INT8 array with no nulls +// len(codes) *must* be either 0 or equal to len(children). If len(codes) is 0, +// the type codes used will be sequentially numeric starting at 0. +func NewSparseUnionFromArrays(typeIDs arrow.Array, children []arrow.Array, codes ...arrow.UnionTypeCode) (*SparseUnion, error) { + return NewSparseUnionFromArraysWithFieldCodes(typeIDs, children, []string{}, codes) +} + +// NewSparseUnionFromArrayWithFields constructs a new SparseUnion array like +// NewSparseUnionFromArrays, but allows specifying the field names. Type codes +// will be auto-generated sequentially starting at 0. +// +// typeIDs *must* be an INT8 array with no nulls. +// len(fields) *must* either be 0 or equal to len(children). If len(fields) is 0, +// then the fields will be named sequentially starting at "0". +func NewSparseUnionFromArraysWithFields(typeIDs arrow.Array, children []arrow.Array, fields []string) (*SparseUnion, error) { + return NewSparseUnionFromArraysWithFieldCodes(typeIDs, children, fields, []arrow.UnionTypeCode{}) +} + +// NewSparseUnionFromArraysWithFieldCodes combines the other constructors +// for constructing a new SparseUnion array with the provided field names +// and type codes, along with children and type ids. +// +// All the requirements mentioned in NewSparseUnionFromArrays and +// NewSparseUnionFromArraysWithFields apply. +func NewSparseUnionFromArraysWithFieldCodes(typeIDs arrow.Array, children []arrow.Array, fields []string, codes []arrow.UnionTypeCode) (*SparseUnion, error) { + switch { + case typeIDs.DataType().ID() != arrow.INT8: + return nil, errors.New("arrow/array: union array type ids must be signed int8") + case typeIDs.NullN() != 0: + return nil, errors.New("arrow/array: union type ids may not have nulls") + case len(fields) > 0 && len(fields) != len(children): + return nil, errors.New("arrow/array: field names must have the same length as children") + case len(codes) > 0 && len(codes) != len(children): + return nil, errors.New("arrow/array: type codes must have same length as children") + } + + buffers := []*memory.Buffer{nil, typeIDs.Data().Buffers()[1]} + ty := arrow.SparseUnionFromArrays(children, fields, codes) + + childData := make([]arrow.ArrayData, len(children)) + for i, c := range children { + childData[i] = c.Data() + if c.Len() != typeIDs.Len() { + return nil, errors.New("arrow/array: sparse union array must have len(child) == len(typeids) for all children") + } + } + + data := NewData(ty, typeIDs.Len(), buffers, childData, 0, typeIDs.Data().Offset()) + defer data.Release() + return NewSparseUnionData(data), nil +} + +func (a *SparseUnion) setData(data *Data) { + a.union.setData(data) + debug.Assert(a.data.dtype.ID() == arrow.SPARSE_UNION, "arrow/array: invalid data type for SparseUnion") + debug.Assert(len(a.data.buffers) == 2, "arrow/array: sparse unions should have exactly 2 buffers") + debug.Assert(a.data.buffers[0] == nil, "arrow/array: validity bitmap for sparse unions should be nil") +} + +func (a *SparseUnion) GetOneForMarshal(i int) interface{} { + typeID := a.RawTypeCodes()[i] + + childID := a.ChildID(i) + data := a.Field(childID) + + if data.IsNull(i) { + return nil + } + + return []interface{}{typeID, data.GetOneForMarshal(i)} +} + +func (a *SparseUnion) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (a *SparseUnion) ValueStr(i int) string { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + panic(err) + } + return buf.String() +} +func (a *SparseUnion) String() string { + var b strings.Builder + b.WriteByte('[') + + fieldList := a.unionType.Fields() + for i := 0; i < a.Len(); i++ { + if i > 0 { + b.WriteString(" ") + } + + field := fieldList[a.ChildID(i)] + f := a.Field(a.ChildID(i)) + fmt.Fprintf(&b, "{%s=%v}", field.Name, f.GetOneForMarshal(i)) + } + b.WriteByte(']') + return b.String() +} + +// GetFlattenedField returns a child array, adjusting its validity bitmap +// where the union array type codes don't match. +// +// ie: the returned array will have a null in every index that it is +// not referenced by union. +func (a *SparseUnion) GetFlattenedField(mem memory.Allocator, index int) (arrow.Array, error) { + if index < 0 || index >= a.NumFields() { + return nil, fmt.Errorf("arrow/array: index out of range: %d", index) + } + + childData := a.data.childData[index] + if a.data.offset != 0 || a.data.length != childData.Len() { + childData = NewSliceData(childData, int64(a.data.offset), int64(a.data.offset+a.data.length)) + // NewSliceData doesn't break the slice reference for buffers + // since we're going to replace the null bitmap buffer we need to break the + // slice reference so that we don't affect a.children's references + newBufs := make([]*memory.Buffer, len(childData.Buffers())) + copy(newBufs, childData.(*Data).buffers) + childData.(*Data).buffers = newBufs + } else { + childData = childData.(*Data).Copy() + } + defer childData.Release() + + // synthesize a null bitmap based on the union discriminant + // make sure the bitmap has extra bits corresponding to the child's offset + flattenedNullBitmap := memory.NewResizableBuffer(mem) + flattenedNullBitmap.Resize(childData.Len() + childData.Offset()) + + var ( + childNullBitmap = childData.Buffers()[0] + childOffset = childData.Offset() + typeCode = a.unionType.TypeCodes()[index] + codes = a.RawTypeCodes() + offset int64 = 0 + ) + bitutils.GenerateBitsUnrolled(flattenedNullBitmap.Bytes(), int64(childOffset), int64(a.data.length), + func() bool { + b := codes[offset] == typeCode + offset++ + return b + }) + + if childNullBitmap != nil { + defer childNullBitmap.Release() + bitutil.BitmapAnd(flattenedNullBitmap.Bytes(), childNullBitmap.Bytes(), + int64(childOffset), int64(childOffset), flattenedNullBitmap.Bytes(), + int64(childOffset), int64(childData.Len())) + } + childData.(*Data).buffers[0] = flattenedNullBitmap + childData.(*Data).nulls = childData.Len() - bitutil.CountSetBits(flattenedNullBitmap.Bytes(), childOffset, childData.Len()) + return MakeFromData(childData), nil +} + +func arraySparseUnionEqual(l, r *SparseUnion) bool { + childIDs := l.unionType.ChildIDs() + leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes() + + for i := 0; i < l.data.length; i++ { + typeID := leftCodes[i] + if typeID != rightCodes[i] { + return false + } + + childNum := childIDs[typeID] + eq := SliceEqual(l.children[childNum], int64(i), int64(i+1), + r.children[childNum], int64(i), int64(i+1)) + if !eq { + return false + } + } + return true +} + +func arraySparseUnionApproxEqual(l, r *SparseUnion, opt equalOption) bool { + childIDs := l.unionType.ChildIDs() + leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes() + + for i := 0; i < l.data.length; i++ { + typeID := leftCodes[i] + if typeID != rightCodes[i] { + return false + } + + childNum := childIDs[typeID] + eq := sliceApproxEqual(l.children[childNum], int64(i+l.data.offset), int64(i+l.data.offset+1), + r.children[childNum], int64(i+r.data.offset), int64(i+r.data.offset+1), opt) + if !eq { + return false + } + } + return true +} + +// DenseUnion represents an array where each logical value is taken from +// a single child, at a specific offset. A buffer of 8-bit type ids +// indicates which child a given logical value is to be taken from and +// a buffer of 32-bit offsets indicating which physical position in the +// given child array has the logical value for that index. +// +// Unlike a sparse union, a dense union allows encoding only the child values +// which are actually referred to by the union array. This is counterbalanced +// by the additional footprint of the offsets buffer, and the additional +// indirection cost when looking up values. +// +// Unlike most other arrays, unions do not have a top-level validity bitmap. +type DenseUnion struct { + union + offsets []int32 +} + +// NewDenseUnion constructs a union array using the given type, length, list of +// children and buffers of typeIDs and offsets, with the given array offset. +func NewDenseUnion(dt *arrow.DenseUnionType, length int, children []arrow.Array, typeIDs, valueOffsets *memory.Buffer, offset int) *DenseUnion { + childData := make([]arrow.ArrayData, len(children)) + for i, c := range children { + childData[i] = c.Data() + } + + data := NewData(dt, length, []*memory.Buffer{nil, typeIDs, valueOffsets}, childData, 0, offset) + defer data.Release() + return NewDenseUnionData(data) +} + +// NewDenseUnionData constructs a DenseUnion array from the given ArrayData object. +func NewDenseUnionData(data arrow.ArrayData) *DenseUnion { + a := &DenseUnion{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// NewDenseUnionFromArrays constructs a new DenseUnion array with the provided +// values. +// +// typeIDs *must* be an INT8 array with no nulls +// offsets *must* be an INT32 array with no nulls +// len(codes) *must* be either 0 or equal to len(children). If len(codes) is 0, +// the type codes used will be sequentially numeric starting at 0. +func NewDenseUnionFromArrays(typeIDs, offsets arrow.Array, children []arrow.Array, codes ...arrow.UnionTypeCode) (*DenseUnion, error) { + return NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets, children, []string{}, codes) +} + +// NewDenseUnionFromArrayWithFields constructs a new DenseUnion array like +// NewDenseUnionFromArrays, but allows specifying the field names. Type codes +// will be auto-generated sequentially starting at 0. +// +// typeIDs *must* be an INT8 array with no nulls. +// offsets *must* be an INT32 array with no nulls. +// len(fields) *must* either be 0 or equal to len(children). If len(fields) is 0, +// then the fields will be named sequentially starting at "0". +func NewDenseUnionFromArraysWithFields(typeIDs, offsets arrow.Array, children []arrow.Array, fields []string) (*DenseUnion, error) { + return NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets, children, fields, []arrow.UnionTypeCode{}) +} + +// NewDenseUnionFromArraysWithFieldCodes combines the other constructors +// for constructing a new DenseUnion array with the provided field names +// and type codes, along with children and type ids. +// +// All the requirements mentioned in NewDenseUnionFromArrays and +// NewDenseUnionFromArraysWithFields apply. +func NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets arrow.Array, children []arrow.Array, fields []string, codes []arrow.UnionTypeCode) (*DenseUnion, error) { + switch { + case offsets.DataType().ID() != arrow.INT32: + return nil, errors.New("arrow/array: union offsets must be signed int32") + case typeIDs.DataType().ID() != arrow.INT8: + return nil, errors.New("arrow/array: union type_ids must be signed int8") + case typeIDs.NullN() != 0: + return nil, errors.New("arrow/array: union typeIDs may not have nulls") + case offsets.NullN() != 0: + return nil, errors.New("arrow/array: nulls are not allowed in offsets for NewDenseUnionFromArrays*") + case len(fields) > 0 && len(fields) != len(children): + return nil, errors.New("arrow/array: fields must be the same length as children") + case len(codes) > 0 && len(codes) != len(children): + return nil, errors.New("arrow/array: typecodes must have the same length as children") + } + + ty := arrow.DenseUnionFromArrays(children, fields, codes) + buffers := []*memory.Buffer{nil, typeIDs.Data().Buffers()[1], offsets.Data().Buffers()[1]} + + childData := make([]arrow.ArrayData, len(children)) + for i, c := range children { + childData[i] = c.Data() + } + + data := NewData(ty, typeIDs.Len(), buffers, childData, 0, typeIDs.Data().Offset()) + defer data.Release() + return NewDenseUnionData(data), nil +} + +func (a *DenseUnion) ValueOffsets() *memory.Buffer { return a.data.buffers[2] } + +func (a *DenseUnion) ValueOffset(i int) int32 { return a.offsets[i+a.data.offset] } + +func (a *DenseUnion) RawValueOffsets() []int32 { return a.offsets[a.data.offset:] } + +func (a *DenseUnion) setData(data *Data) { + a.union.setData(data) + debug.Assert(a.data.dtype.ID() == arrow.DENSE_UNION, "arrow/array: invalid data type for DenseUnion") + debug.Assert(len(a.data.buffers) == 3, "arrow/array: dense unions should have exactly 3 buffers") + debug.Assert(a.data.buffers[0] == nil, "arrow/array: validity bitmap for dense unions should be nil") + + if data.length > 0 { + a.offsets = arrow.Int32Traits.CastFromBytes(a.data.buffers[2].Bytes()) + } else { + a.offsets = []int32{} + } +} + +func (a *DenseUnion) GetOneForMarshal(i int) interface{} { + typeID := a.RawTypeCodes()[i] + + childID := a.ChildID(i) + data := a.Field(childID) + + offsets := a.RawValueOffsets() + if data.IsNull(int(offsets[i])) { + return nil + } + + return []interface{}{typeID, data.GetOneForMarshal(int(offsets[i]))} +} + +func (a *DenseUnion) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (a *DenseUnion) ValueStr(i int) string { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + panic(err) + } + return buf.String() +} + +func (a *DenseUnion) String() string { + var b strings.Builder + b.WriteByte('[') + + offsets := a.RawValueOffsets() + + fieldList := a.unionType.Fields() + for i := 0; i < a.Len(); i++ { + if i > 0 { + b.WriteString(" ") + } + + field := fieldList[a.ChildID(i)] + f := a.Field(a.ChildID(i)) + fmt.Fprintf(&b, "{%s=%v}", field.Name, f.GetOneForMarshal(int(offsets[i]))) + } + b.WriteByte(']') + return b.String() +} + +func arrayDenseUnionEqual(l, r *DenseUnion) bool { + childIDs := l.unionType.ChildIDs() + leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes() + leftOffsets, rightOffsets := l.RawValueOffsets(), r.RawValueOffsets() + + for i := 0; i < l.data.length; i++ { + typeID := leftCodes[i] + if typeID != rightCodes[i] { + return false + } + + childNum := childIDs[typeID] + eq := SliceEqual(l.children[childNum], int64(leftOffsets[i]), int64(leftOffsets[i]+1), + r.children[childNum], int64(rightOffsets[i]), int64(rightOffsets[i]+1)) + if !eq { + return false + } + } + return true +} + +func arrayDenseUnionApproxEqual(l, r *DenseUnion, opt equalOption) bool { + childIDs := l.unionType.ChildIDs() + leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes() + leftOffsets, rightOffsets := l.RawValueOffsets(), r.RawValueOffsets() + + for i := 0; i < l.data.length; i++ { + typeID := leftCodes[i] + if typeID != rightCodes[i] { + return false + } + + childNum := childIDs[typeID] + eq := sliceApproxEqual(l.children[childNum], int64(leftOffsets[i]), int64(leftOffsets[i]+1), + r.children[childNum], int64(rightOffsets[i]), int64(rightOffsets[i]+1), opt) + if !eq { + return false + } + } + return true +} + +// UnionBuilder is a convenience interface for building Union arrays of +// either Dense or Sparse mode. +type UnionBuilder interface { + Builder + // AppendNulls appends n nulls to the array + AppendNulls(n int) + // AppendEmptyValues appends n empty zero values to the array + AppendEmptyValues(n int) + // AppendChild allows constructing the union type on the fly by making new + // new array builder available to the union builder. The type code (index) + // of the new child is returned, which should be passed to the Append method + // when adding a new element to the union array. + AppendChild(newChild Builder, fieldName string) (newCode arrow.UnionTypeCode) + // Append adds an element to the UnionArray indicating which typecode the + // new element should use. This *must* be followed up by an append to the + // appropriate child builder. + Append(arrow.UnionTypeCode) + // Mode returns what kind of Union is being built, either arrow.SparseMode + // or arrow.DenseMode + Mode() arrow.UnionMode + // Child returns the builder for the requested child index. + // If an invalid index is requested (e.g. <0 or >len(children)) + // then this will panic. + Child(idx int) Builder +} + +type unionBuilder struct { + builder + + childFields []arrow.Field + codes []arrow.UnionTypeCode + mode arrow.UnionMode + + children []Builder + typeIDtoBuilder []Builder + typeIDtoChildID []int + // for all typeID < denseTypeID, typeIDtoBuilder[typeID] != nil + denseTypeID arrow.UnionTypeCode + typesBuilder *int8BufferBuilder +} + +func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionType) unionBuilder { + if children == nil { + children = make([]Builder, 0) + } + b := unionBuilder{ + builder: builder{refCount: 1, mem: mem}, + mode: typ.Mode(), + codes: typ.TypeCodes(), + children: children, + typeIDtoChildID: make([]int, typ.MaxTypeCode()+1), + typeIDtoBuilder: make([]Builder, typ.MaxTypeCode()+1), + childFields: make([]arrow.Field, len(children)), + typesBuilder: newInt8BufferBuilder(mem), + } + + b.typeIDtoChildID[0] = arrow.InvalidUnionChildID + for i := 1; i < len(b.typeIDtoChildID); i *= 2 { + copy(b.typeIDtoChildID[i:], b.typeIDtoChildID[:i]) + } + + debug.Assert(len(children) == len(typ.TypeCodes()), "mismatched typecodes and children") + debug.Assert(len(b.typeIDtoBuilder)-1 <= int(arrow.MaxUnionTypeCode), "too many typeids") + + copy(b.childFields, typ.Fields()) + for i, c := range children { + c.Retain() + typeID := typ.TypeCodes()[i] + b.typeIDtoChildID[typeID] = i + b.typeIDtoBuilder[typeID] = c + } + + return b +} + +func (b *unionBuilder) NumChildren() int { + return len(b.children) +} + +func (b *unionBuilder) Child(idx int) Builder { + if idx < 0 || idx > len(b.children) { + panic("arrow/array: invalid child index for union builder") + } + return b.children[idx] +} + +// Len returns the current number of elements in the builder. +func (b *unionBuilder) Len() int { return b.typesBuilder.Len() } + +func (b *unionBuilder) Mode() arrow.UnionMode { return b.mode } + +func (b *unionBuilder) reserve(elements int, resize func(int)) { + // union has no null bitmap, ever so we can skip that handling + if b.length+elements > b.capacity { + b.capacity = bitutil.NextPowerOf2(b.length + elements) + resize(b.capacity) + } +} + +func (b *unionBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + for _, c := range b.children { + c.Release() + } + b.typesBuilder.Release() + } +} + +func (b *unionBuilder) Type() arrow.DataType { + fields := make([]arrow.Field, len(b.childFields)) + for i, f := range b.childFields { + fields[i] = f + fields[i].Type = b.children[i].Type() + } + + switch b.mode { + case arrow.SparseMode: + return arrow.SparseUnionOf(fields, b.codes) + case arrow.DenseMode: + return arrow.DenseUnionOf(fields, b.codes) + default: + panic("invalid union builder mode") + } +} + +func (b *unionBuilder) AppendChild(newChild Builder, fieldName string) arrow.UnionTypeCode { + newChild.Retain() + b.children = append(b.children, newChild) + newType := b.nextTypeID() + + b.typeIDtoChildID[newType] = len(b.children) - 1 + b.typeIDtoBuilder[newType] = newChild + b.childFields = append(b.childFields, arrow.Field{Name: fieldName, Nullable: true}) + b.codes = append(b.codes, newType) + + return newType +} + +func (b *unionBuilder) nextTypeID() arrow.UnionTypeCode { + // find typeID such that typeIDtoBuilder[typeID] == nil + // use that for the new child. Start searching at denseTypeID + // since typeIDtoBuilder is densely packed up at least to denseTypeID + for ; int(b.denseTypeID) < len(b.typeIDtoBuilder); b.denseTypeID++ { + if b.typeIDtoBuilder[b.denseTypeID] == nil { + id := b.denseTypeID + b.denseTypeID++ + return id + } + } + + debug.Assert(len(b.typeIDtoBuilder) < int(arrow.MaxUnionTypeCode), "too many children typeids") + // typeIDtoBuilder is already densely packed, so just append the new child + b.typeIDtoBuilder = append(b.typeIDtoBuilder, nil) + b.typeIDtoChildID = append(b.typeIDtoChildID, arrow.InvalidUnionChildID) + id := b.denseTypeID + b.denseTypeID++ + return id + +} + +func (b *unionBuilder) newData() *Data { + length := b.typesBuilder.Len() + typesBuffer := b.typesBuilder.Finish() + defer typesBuffer.Release() + childData := make([]arrow.ArrayData, len(b.children)) + for i, b := range b.children { + childData[i] = b.newData() + defer childData[i].Release() + } + + return NewData(b.Type(), length, []*memory.Buffer{nil, typesBuffer}, childData, 0, 0) +} + +// SparseUnionBuilder is used to build a Sparse Union array using the Append +// methods. You can also add new types to the union on the fly by using +// AppendChild. +// +// Keep in mind: All children of a SparseUnion should be the same length +// as the union itself. If you add new children with AppendChild, ensure +// that they have the correct number of preceding elements that have been +// added to the builder beforehand. +type SparseUnionBuilder struct { + unionBuilder +} + +// NewEmptySparseUnionBuilder is a helper to construct a SparseUnionBuilder +// without having to predefine the union types. It creates a builder with no +// children and AppendChild will have to be called before appending any +// elements to this builder. +func NewEmptySparseUnionBuilder(mem memory.Allocator) *SparseUnionBuilder { + return &SparseUnionBuilder{ + unionBuilder: newUnionBuilder(mem, nil, arrow.SparseUnionOf([]arrow.Field{}, []arrow.UnionTypeCode{})), + } +} + +// NewSparseUnionBuilder constructs a new SparseUnionBuilder with the provided +// children and type codes. Builders will be constructed for each child +// using the fields in typ +func NewSparseUnionBuilder(mem memory.Allocator, typ *arrow.SparseUnionType) *SparseUnionBuilder { + children := make([]Builder, len(typ.Fields())) + for i, f := range typ.Fields() { + children[i] = NewBuilder(mem, f.Type) + defer children[i].Release() + } + return NewSparseUnionBuilderWithBuilders(mem, typ, children) +} + +// NewSparseUnionWithBuilders returns a new SparseUnionBuilder using the +// provided type and builders. +func NewSparseUnionBuilderWithBuilders(mem memory.Allocator, typ *arrow.SparseUnionType, children []Builder) *SparseUnionBuilder { + return &SparseUnionBuilder{ + unionBuilder: newUnionBuilder(mem, children, typ), + } +} + +func (b *SparseUnionBuilder) Reserve(n int) { + b.reserve(n, b.Resize) +} + +func (b *SparseUnionBuilder) Resize(n int) { + b.typesBuilder.resize(n) +} + +// AppendNull will append a null to the first child and an empty value +// (implementation-defined) to the rest of the children. +func (b *SparseUnionBuilder) AppendNull() { + firstChildCode := b.codes[0] + b.typesBuilder.AppendValue(firstChildCode) + b.typeIDtoBuilder[firstChildCode].AppendNull() + for _, c := range b.codes[1:] { + b.typeIDtoBuilder[c].AppendEmptyValue() + } +} + +// AppendNulls is identical to calling AppendNull() n times, except +// it will pre-allocate with reserve for all the nulls beforehand. +func (b *SparseUnionBuilder) AppendNulls(n int) { + firstChildCode := b.codes[0] + b.Reserve(n) + for _, c := range b.codes { + b.typeIDtoBuilder[c].Reserve(n) + } + for i := 0; i < n; i++ { + b.typesBuilder.AppendValue(firstChildCode) + b.typeIDtoBuilder[firstChildCode].AppendNull() + for _, c := range b.codes[1:] { + b.typeIDtoBuilder[c].AppendEmptyValue() + } + } +} + +// AppendEmptyValue appends an empty value (implementation defined) +// to each child, and appends the type of the first typecode to the typeid +// buffer. +func (b *SparseUnionBuilder) AppendEmptyValue() { + b.typesBuilder.AppendValue(b.codes[0]) + for _, c := range b.codes { + b.typeIDtoBuilder[c].AppendEmptyValue() + } +} + +// AppendEmptyValues is identical to calling AppendEmptyValue() n times, +// except it pre-allocates first so it is more efficient. +func (b *SparseUnionBuilder) AppendEmptyValues(n int) { + b.Reserve(n) + firstChildCode := b.codes[0] + for _, c := range b.codes { + b.typeIDtoBuilder[c].Reserve(n) + } + for i := 0; i < n; i++ { + b.typesBuilder.AppendValue(firstChildCode) + for _, c := range b.codes { + b.typeIDtoBuilder[c].AppendEmptyValue() + } + } +} + +// Append appends an element to the UnionArray and must be followed up +// by an append to the appropriate child builder. The parameter should +// be the type id of the child to which the next value will be appended. +// +// After appending to the corresponding child builder, all other child +// builders should have a null or empty value appended to them (although +// this is not enfoced and any value is theoretically allowed and will be +// ignored). +func (b *SparseUnionBuilder) Append(nextType arrow.UnionTypeCode) { + b.typesBuilder.AppendValue(nextType) +} + +func (b *SparseUnionBuilder) NewArray() arrow.Array { + return b.NewSparseUnionArray() +} + +func (b *SparseUnionBuilder) NewSparseUnionArray() (a *SparseUnion) { + data := b.newData() + a = NewSparseUnionData(data) + data.Release() + return +} + +func (b *SparseUnionBuilder) UnmarshalJSON(data []byte) (err error) { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("sparse union builder must unpack from json array, found %s", t) + } + return b.Unmarshal(dec) +} + +func (b *SparseUnionBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *SparseUnionBuilder) AppendValueFromString(s string) error { + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + +func (b *SparseUnionBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + // should be [type_id, Value] + typeID, err := dec.Token() + if err != nil { + return err + } + + var typeCode int8 + + switch tid := typeID.(type) { + case json.Number: + id, err := tid.Int64() + if err != nil { + return err + } + typeCode = int8(id) + case float64: + if tid != float64(int64(tid)) { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Type: reflect.TypeOf(int8(0)), + Struct: fmt.Sprint(b.Type()), + Value: "float", + } + } + typeCode = int8(tid) + } + + childNum := b.typeIDtoChildID[typeCode] + if childNum == arrow.InvalidUnionChildID { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: "invalid type code", + } + } + + for i, c := range b.children { + if i != childNum { + c.AppendNull() + } + } + + b.Append(typeCode) + if err := b.children[childNum].UnmarshalOne(dec); err != nil { + return err + } + + endArr, err := dec.Token() + if err != nil { + return err + } + + if endArr != json.Delim(']') { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: "union value array should have exactly 2 elements", + } + } + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: fmt.Sprint(t), + Struct: fmt.Sprint(b.Type()), + } + } + return nil +} + +// DenseUnionBuilder is used to build a Dense Union array using the Append +// methods. You can also add new types to the union on the fly by using +// AppendChild. +type DenseUnionBuilder struct { + unionBuilder + + offsetsBuilder *int32BufferBuilder +} + +// NewEmptyDenseUnionBuilder is a helper to construct a DenseUnionBuilder +// without having to predefine the union types. It creates a builder with no +// children and AppendChild will have to be called before appending any +// elements to this builder. +func NewEmptyDenseUnionBuilder(mem memory.Allocator) *DenseUnionBuilder { + return &DenseUnionBuilder{ + unionBuilder: newUnionBuilder(mem, nil, arrow.DenseUnionOf([]arrow.Field{}, []arrow.UnionTypeCode{})), + offsetsBuilder: newInt32BufferBuilder(mem), + } +} + +// NewDenseUnionBuilder constructs a new DenseUnionBuilder with the provided +// children and type codes. Builders will be constructed for each child +// using the fields in typ +func NewDenseUnionBuilder(mem memory.Allocator, typ *arrow.DenseUnionType) *DenseUnionBuilder { + children := make([]Builder, len(typ.Fields())) + for i, f := range typ.Fields() { + children[i] = NewBuilder(mem, f.Type) + defer children[i].Release() + } + return NewDenseUnionBuilderWithBuilders(mem, typ, children) +} + +// NewDenseUnionWithBuilders returns a new DenseUnionBuilder using the +// provided type and builders. +func NewDenseUnionBuilderWithBuilders(mem memory.Allocator, typ *arrow.DenseUnionType, children []Builder) *DenseUnionBuilder { + return &DenseUnionBuilder{ + unionBuilder: newUnionBuilder(mem, children, typ), + offsetsBuilder: newInt32BufferBuilder(mem), + } +} + +func (b *DenseUnionBuilder) Reserve(n int) { + b.reserve(n, b.Resize) +} + +func (b *DenseUnionBuilder) Resize(n int) { + b.typesBuilder.resize(n) + b.offsetsBuilder.resize(n * arrow.Int32SizeBytes) +} + +// AppendNull will only append a null value arbitrarily to the first child +// and use that offset for this element of the array. +func (b *DenseUnionBuilder) AppendNull() { + firstChildCode := b.codes[0] + childBuilder := b.typeIDtoBuilder[firstChildCode] + b.typesBuilder.AppendValue(firstChildCode) + b.offsetsBuilder.AppendValue(int32(childBuilder.Len())) + childBuilder.AppendNull() +} + +// AppendNulls will only append a single null arbitrarily to the first child +// and use the same offset multiple times to point to it. The result is that +// for a DenseUnion this is more efficient than calling AppendNull multiple +// times in a loop +func (b *DenseUnionBuilder) AppendNulls(n int) { + // only append 1 null to the child builder, use the same offset twice + firstChildCode := b.codes[0] + childBuilder := b.typeIDtoBuilder[firstChildCode] + b.Reserve(n) + for i := 0; i < n; i++ { + b.typesBuilder.AppendValue(firstChildCode) + b.offsetsBuilder.AppendValue(int32(childBuilder.Len())) + } + // only append a single null to the child builder, the offsets all refer to the same value + childBuilder.AppendNull() +} + +// AppendEmptyValue only appends an empty value arbitrarily to the first child, +// and then uses that offset to identify the value. +func (b *DenseUnionBuilder) AppendEmptyValue() { + firstChildCode := b.codes[0] + childBuilder := b.typeIDtoBuilder[firstChildCode] + b.typesBuilder.AppendValue(firstChildCode) + b.offsetsBuilder.AppendValue(int32(childBuilder.Len())) + childBuilder.AppendEmptyValue() +} + +// AppendEmptyValues, like AppendNulls, will only append a single empty value +// (implementation defined) to the first child arbitrarily, and then point +// at that value using the offsets n times. That makes this more efficient +// than calling AppendEmptyValue multiple times. +func (b *DenseUnionBuilder) AppendEmptyValues(n int) { + // only append 1 null to the child builder, use the same offset twice + firstChildCode := b.codes[0] + childBuilder := b.typeIDtoBuilder[firstChildCode] + b.Reserve(n) + for i := 0; i < n; i++ { + b.typesBuilder.AppendValue(firstChildCode) + b.offsetsBuilder.AppendValue(int32(childBuilder.Len())) + } + // only append a single empty value to the child builder, the offsets all + // refer to the same value + childBuilder.AppendEmptyValue() +} + +// Append appends the necessary offset and type code to the builder +// and must be followed up with an append to the appropriate child builder +func (b *DenseUnionBuilder) Append(nextType arrow.UnionTypeCode) { + b.typesBuilder.AppendValue(nextType) + bldr := b.typeIDtoBuilder[nextType] + if bldr.Len() == kMaxElems { + panic("a dense UnionArray cannot contain more than 2^31 - 1 elements from a single child") + } + + b.offsetsBuilder.AppendValue(int32(bldr.Len())) +} + +func (b *DenseUnionBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + for _, c := range b.children { + c.Release() + } + b.typesBuilder.Release() + b.offsetsBuilder.Release() + } +} + +func (b *DenseUnionBuilder) newData() *Data { + data := b.unionBuilder.newData() + data.buffers = append(data.buffers, b.offsetsBuilder.Finish()) + return data +} + +func (b *DenseUnionBuilder) NewArray() arrow.Array { + return b.NewDenseUnionArray() +} + +func (b *DenseUnionBuilder) NewDenseUnionArray() (a *DenseUnion) { + data := b.newData() + a = NewDenseUnionData(data) + data.Release() + return +} + +func (b *DenseUnionBuilder) UnmarshalJSON(data []byte) (err error) { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("dense union builder must unpack from json array, found %s", t) + } + return b.Unmarshal(dec) +} + +func (b *DenseUnionBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (d *DenseUnionBuilder) AppendValueFromString(s string) error { + dec := json.NewDecoder(strings.NewReader(s)) + return d.UnmarshalOne(dec) +} + +func (b *DenseUnionBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + // should be [type_id, Value] + typeID, err := dec.Token() + if err != nil { + return err + } + + var typeCode int8 + + switch tid := typeID.(type) { + case json.Number: + id, err := tid.Int64() + if err != nil { + return err + } + typeCode = int8(id) + case float64: + if tid != float64(int64(tid)) { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Type: reflect.TypeOf(int8(0)), + Struct: fmt.Sprint(b.Type()), + Value: "float", + } + } + typeCode = int8(tid) + } + + childNum := b.typeIDtoChildID[typeCode] + if childNum == arrow.InvalidUnionChildID { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: "invalid type code", + } + } + + b.Append(typeCode) + if err := b.children[childNum].UnmarshalOne(dec); err != nil { + return err + } + + endArr, err := dec.Token() + if err != nil { + return err + } + + if endArr != json.Delim(']') { + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: "union value array should have exactly 2 elements", + } + } + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Offset: dec.InputOffset(), + Value: fmt.Sprint(t), + Struct: fmt.Sprint(b.Type()), + } + } + return nil +} + +var ( + _ arrow.Array = (*SparseUnion)(nil) + _ arrow.Array = (*DenseUnion)(nil) + _ Union = (*SparseUnion)(nil) + _ Union = (*DenseUnion)(nil) + _ Builder = (*SparseUnionBuilder)(nil) + _ Builder = (*DenseUnionBuilder)(nil) + _ UnionBuilder = (*SparseUnionBuilder)(nil) + _ UnionBuilder = (*DenseUnionBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/util.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/util.go new file mode 100644 index 00000000..897bae7a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/array/util.go @@ -0,0 +1,520 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "errors" + "fmt" + "io" + "strings" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/hashing" + "github.com/goccy/go-json" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +type fromJSONCfg struct { + multiDocument bool + startOffset int64 + useNumber bool +} + +type FromJSONOption func(*fromJSONCfg) + +func WithMultipleDocs() FromJSONOption { + return func(c *fromJSONCfg) { + c.multiDocument = true + } +} + +// WithStartOffset attempts to start decoding from the reader at the offset +// passed in. If using this option the reader must fulfill the io.ReadSeeker +// interface, or else an error will be returned. +// +// It will call Seek(off, io.SeekStart) on the reader +func WithStartOffset(off int64) FromJSONOption { + return func(c *fromJSONCfg) { + c.startOffset = off + } +} + +// WithUseNumber enables the 'UseNumber' option on the json decoder, using +// the json.Number type instead of assuming float64 for numbers. This is critical +// if you have numbers that are larger than what can fit into the 53 bits of +// an IEEE float64 mantissa and want to preserve its value. +func WithUseNumber() FromJSONOption { + return func(c *fromJSONCfg) { + c.useNumber = true + } +} + +// FromJSON creates an arrow.Array from a corresponding JSON stream and defined data type. If the types in the +// json do not match the type provided, it will return errors. This is *not* the integration test format +// and should not be used as such. This intended to be used by consumers more similarly to the current exposing of +// the csv reader/writer. It also returns the input offset in the reader where it finished decoding since buffering +// by the decoder could leave the reader's cursor past where the parsing finished if attempting to parse multiple json +// arrays from one stream. +// +// All the Array types implement json.Marshaller and thus can be written to json +// using the json.Marshal function +// +// The JSON provided must be formatted in one of two ways: +// Default: the top level of the json must be a list which matches the type specified exactly +// Example: `[1, 2, 3, 4, 5]` for any integer type or `[[...], null, [], .....]` for a List type +// Struct arrays are represented a list of objects: `[{"foo": 1, "bar": "moo"}, {"foo": 5, "bar": "baz"}]` +// +// Using WithMultipleDocs: +// If the JSON provided is multiple newline separated json documents, then use this option +// and each json document will be treated as a single row of the array. This is most useful for record batches +// and interacting with other processes that use json. For example: +// `{"col1": 1, "col2": "row1", "col3": ...}\n{"col1": 2, "col2": "row2", "col3": ...}\n.....` +// +// Duration values get formated upon marshalling as a string consisting of their numeric +// value followed by the unit suffix such as "10s" for a value of 10 and unit of Seconds. +// with "ms" for millisecond, "us" for microsecond, and "ns" for nanosecond as the suffixes. +// Unmarshalling duration values is more permissive since it first tries to use Go's +// time.ParseDuration function which means it allows values in the form 3h25m0.3s in addition +// to the same values which are output. +// +// Interval types are marshalled / unmarshalled as follows: +// MonthInterval is marshalled as an object with the format: +// { "months": #} +// DayTimeInterval is marshalled using Go's regular marshalling of structs: +// { "days": #, "milliseconds": # } +// MonthDayNanoInterval values are marshalled the same as DayTime using Go's struct marshalling: +// { "months": #, "days": #, "nanoseconds": # } +// +// Times use a format of HH:MM or HH:MM:SS[.zzz] where the fractions of a second cannot +// exceed the precision allowed by the time unit, otherwise unmarshalling will error. +// +// Dates use YYYY-MM-DD format +// +// Timestamps use RFC3339Nano format except without a timezone, all of the following are valid: +// YYYY-MM-DD +// YYYY-MM-DD[T]HH +// YYYY-MM-DD[T]HH:MM +// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzzzz] +// +// The fractions of a second cannot exceed the precision allowed by the timeunit of the datatype. +// +// When processing structs as objects order of keys does not matter, but keys cannot be repeated. +func FromJSON(mem memory.Allocator, dt arrow.DataType, r io.Reader, opts ...FromJSONOption) (arr arrow.Array, offset int64, err error) { + var cfg fromJSONCfg + for _, o := range opts { + o(&cfg) + } + + if cfg.startOffset != 0 { + seeker, ok := r.(io.ReadSeeker) + if !ok { + return nil, 0, errors.New("using StartOffset option requires reader to be a ReadSeeker, cannot seek") + } + + seeker.Seek(cfg.startOffset, io.SeekStart) + } + + bldr := NewBuilder(mem, dt) + defer bldr.Release() + + dec := json.NewDecoder(r) + defer func() { + if errors.Is(err, io.EOF) { + err = fmt.Errorf("failed parsing json: %w", io.ErrUnexpectedEOF) + } + }() + + if cfg.useNumber { + dec.UseNumber() + } + + if !cfg.multiDocument { + t, err := dec.Token() + if err != nil { + return nil, dec.InputOffset(), err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return nil, dec.InputOffset(), fmt.Errorf("json doc must be an array, found %s", delim) + } + } + + if err = bldr.Unmarshal(dec); err != nil { + return nil, dec.InputOffset(), err + } + + if !cfg.multiDocument { + // consume the last ']' + if _, err = dec.Token(); err != nil { + return nil, dec.InputOffset(), err + } + } + + return bldr.NewArray(), dec.InputOffset(), nil +} + +// RecordToStructArray constructs a struct array from the columns of the record batch +// by referencing them, zero-copy. +func RecordToStructArray(rec arrow.Record) *Struct { + cols := make([]arrow.ArrayData, rec.NumCols()) + for i, c := range rec.Columns() { + cols[i] = c.Data() + } + + data := NewData(arrow.StructOf(rec.Schema().Fields()...), int(rec.NumRows()), []*memory.Buffer{nil}, cols, 0, 0) + defer data.Release() + + return NewStructData(data) +} + +// RecordFromStructArray is a convenience function for converting a struct array into +// a record batch without copying the data. If the passed in schema is nil, the fields +// of the struct will be used to define the record batch. Otherwise the passed in +// schema will be used to create the record batch. If passed in, the schema must match +// the fields of the struct column. +func RecordFromStructArray(in *Struct, schema *arrow.Schema) arrow.Record { + if schema == nil { + schema = arrow.NewSchema(in.DataType().(*arrow.StructType).Fields(), nil) + } + + return NewRecord(schema, in.fields, int64(in.Len())) +} + +// RecordFromJSON creates a record batch from JSON data. See array.FromJSON for the details +// of formatting and logic. +// +// A record batch from JSON is equivalent to reading a struct array in from json and then +// converting it to a record batch. +func RecordFromJSON(mem memory.Allocator, schema *arrow.Schema, r io.Reader, opts ...FromJSONOption) (arrow.Record, int64, error) { + st := arrow.StructOf(schema.Fields()...) + arr, off, err := FromJSON(mem, st, r, opts...) + if err != nil { + return nil, off, err + } + defer arr.Release() + + return RecordFromStructArray(arr.(*Struct), schema), off, nil +} + +// RecordToJSON writes out the given record following the format of each row is a single object +// on a single line of the output. +func RecordToJSON(rec arrow.Record, w io.Writer) error { + enc := json.NewEncoder(w) + + fields := rec.Schema().Fields() + + cols := make(map[string]interface{}) + for i := 0; int64(i) < rec.NumRows(); i++ { + for j, c := range rec.Columns() { + cols[fields[j].Name] = c.GetOneForMarshal(i) + } + if err := enc.Encode(cols); err != nil { + return err + } + } + return nil +} + +func TableFromJSON(mem memory.Allocator, sc *arrow.Schema, recJSON []string, opt ...FromJSONOption) (arrow.Table, error) { + batches := make([]arrow.Record, len(recJSON)) + for i, batchJSON := range recJSON { + batch, _, err := RecordFromJSON(mem, sc, strings.NewReader(batchJSON), opt...) + if err != nil { + return nil, err + } + defer batch.Release() + batches[i] = batch + } + return NewTableFromRecords(sc, batches), nil +} + +func GetDictArrayData(mem memory.Allocator, valueType arrow.DataType, memoTable hashing.MemoTable, startOffset int) (*Data, error) { + dictLen := memoTable.Size() - startOffset + buffers := []*memory.Buffer{nil, nil} + + buffers[1] = memory.NewResizableBuffer(mem) + defer buffers[1].Release() + + switch tbl := memoTable.(type) { + case hashing.NumericMemoTable: + nbytes := tbl.TypeTraits().BytesRequired(dictLen) + buffers[1].Resize(nbytes) + tbl.WriteOutSubset(startOffset, buffers[1].Bytes()) + case *hashing.BinaryMemoTable: + switch valueType.ID() { + case arrow.BINARY, arrow.STRING: + buffers = append(buffers, memory.NewResizableBuffer(mem)) + defer buffers[2].Release() + + buffers[1].Resize(arrow.Int32Traits.BytesRequired(dictLen + 1)) + offsets := arrow.Int32Traits.CastFromBytes(buffers[1].Bytes()) + tbl.CopyOffsetsSubset(startOffset, offsets) + + valuesz := offsets[len(offsets)-1] - offsets[0] + buffers[2].Resize(int(valuesz)) + tbl.CopyValuesSubset(startOffset, buffers[2].Bytes()) + case arrow.LARGE_BINARY, arrow.LARGE_STRING: + buffers = append(buffers, memory.NewResizableBuffer(mem)) + defer buffers[2].Release() + + buffers[1].Resize(arrow.Int64Traits.BytesRequired(dictLen + 1)) + offsets := arrow.Int64Traits.CastFromBytes(buffers[1].Bytes()) + tbl.CopyLargeOffsetsSubset(startOffset, offsets) + + valuesz := offsets[len(offsets)-1] - offsets[0] + buffers[2].Resize(int(valuesz)) + tbl.CopyValuesSubset(startOffset, buffers[2].Bytes()) + default: // fixed size + bw := int(bitutil.BytesForBits(int64(valueType.(arrow.FixedWidthDataType).BitWidth()))) + buffers[1].Resize(dictLen * bw) + tbl.CopyFixedWidthValues(startOffset, bw, buffers[1].Bytes()) + } + default: + return nil, fmt.Errorf("arrow/array: dictionary unifier unimplemented type: %s", valueType) + } + + var nullcount int + if idx, ok := memoTable.GetNull(); ok && idx >= startOffset { + buffers[0] = memory.NewResizableBuffer(mem) + defer buffers[0].Release() + nullcount = 1 + buffers[0].Resize(int(bitutil.BytesForBits(int64(dictLen)))) + memory.Set(buffers[0].Bytes(), 0xFF) + bitutil.ClearBit(buffers[0].Bytes(), idx) + } + + return NewData(valueType, dictLen, buffers, nil, nullcount, 0), nil +} + +func DictArrayFromJSON(mem memory.Allocator, dt *arrow.DictionaryType, indicesJSON, dictJSON string) (arrow.Array, error) { + indices, _, err := FromJSON(mem, dt.IndexType, strings.NewReader(indicesJSON)) + if err != nil { + return nil, err + } + defer indices.Release() + + dict, _, err := FromJSON(mem, dt.ValueType, strings.NewReader(dictJSON)) + if err != nil { + return nil, err + } + defer dict.Release() + + return NewDictionaryArray(dt, indices, dict), nil +} + +func ChunkedFromJSON(mem memory.Allocator, dt arrow.DataType, chunkStrs []string, opts ...FromJSONOption) (*arrow.Chunked, error) { + chunks := make([]arrow.Array, len(chunkStrs)) + defer func() { + for _, c := range chunks { + if c != nil { + c.Release() + } + } + }() + + var err error + for i, c := range chunkStrs { + chunks[i], _, err = FromJSON(mem, dt, strings.NewReader(c), opts...) + if err != nil { + return nil, err + } + } + + return arrow.NewChunked(dt, chunks), nil +} + +func getMaxBufferLen(dt arrow.DataType, length int) int { + bufferLen := int(bitutil.BytesForBits(int64(length))) + + maxOf := func(bl int) int { + if bl > bufferLen { + return bl + } + return bufferLen + } + + switch dt := dt.(type) { + case *arrow.DictionaryType: + bufferLen = maxOf(getMaxBufferLen(dt.ValueType, length)) + return maxOf(getMaxBufferLen(dt.IndexType, length)) + case *arrow.FixedSizeBinaryType: + return maxOf(dt.ByteWidth * length) + case arrow.FixedWidthDataType: + return maxOf(int(bitutil.BytesForBits(int64(dt.BitWidth()))) * length) + case *arrow.StructType: + for _, f := range dt.Fields() { + bufferLen = maxOf(getMaxBufferLen(f.Type, length)) + } + return bufferLen + case *arrow.SparseUnionType: + // type codes + bufferLen = maxOf(length) + // creates children of the same length of the union + for _, f := range dt.Fields() { + bufferLen = maxOf(getMaxBufferLen(f.Type, length)) + } + return bufferLen + case *arrow.DenseUnionType: + // type codes + bufferLen = maxOf(length) + // offsets + bufferLen = maxOf(arrow.Int32SizeBytes * length) + // create children of length 1 + for _, f := range dt.Fields() { + bufferLen = maxOf(getMaxBufferLen(f.Type, 1)) + } + return bufferLen + case arrow.OffsetsDataType: + return maxOf(dt.OffsetTypeTraits().BytesRequired(length + 1)) + case *arrow.FixedSizeListType: + return maxOf(getMaxBufferLen(dt.Elem(), int(dt.Len())*length)) + case arrow.ExtensionType: + return maxOf(getMaxBufferLen(dt.StorageType(), length)) + default: + panic(fmt.Errorf("arrow/array: arrayofnull not implemented for type %s", dt)) + } +} + +type nullArrayFactory struct { + mem memory.Allocator + dt arrow.DataType + len int + buf *memory.Buffer +} + +func (n *nullArrayFactory) create() *Data { + if n.buf == nil { + bufLen := getMaxBufferLen(n.dt, n.len) + n.buf = memory.NewResizableBuffer(n.mem) + n.buf.Resize(bufLen) + defer n.buf.Release() + } + + var ( + dt = n.dt + bufs = []*memory.Buffer{memory.SliceBuffer(n.buf, 0, int(bitutil.BytesForBits(int64(n.len))))} + childData []arrow.ArrayData + dictData arrow.ArrayData + ) + defer bufs[0].Release() + + if ex, ok := dt.(arrow.ExtensionType); ok { + dt = ex.StorageType() + } + + if nf, ok := dt.(arrow.NestedType); ok { + childData = make([]arrow.ArrayData, len(nf.Fields())) + } + + switch dt := dt.(type) { + case *arrow.NullType: + case *arrow.DictionaryType: + bufs = append(bufs, n.buf) + arr := MakeArrayOfNull(n.mem, dt.ValueType, 0) + defer arr.Release() + dictData = arr.Data() + case arrow.FixedWidthDataType: + bufs = append(bufs, n.buf) + case arrow.BinaryDataType: + bufs = append(bufs, n.buf, n.buf) + case arrow.OffsetsDataType: + bufs = append(bufs, n.buf) + childData[0] = n.createChild(dt, 0, 0) + defer childData[0].Release() + case *arrow.FixedSizeListType: + childData[0] = n.createChild(dt, 0, n.len*int(dt.Len())) + defer childData[0].Release() + case *arrow.StructType: + for i := range dt.Fields() { + childData[i] = n.createChild(dt, i, n.len) + defer childData[i].Release() + } + case *arrow.RunEndEncodedType: + bldr := NewBuilder(n.mem, dt.RunEnds()) + defer bldr.Release() + + switch b := bldr.(type) { + case *Int16Builder: + b.Append(int16(n.len)) + case *Int32Builder: + b.Append(int32(n.len)) + case *Int64Builder: + b.Append(int64(n.len)) + } + + childData[0] = bldr.newData() + defer childData[0].Release() + childData[1] = n.createChild(dt.Encoded(), 1, 1) + defer childData[1].Release() + case arrow.UnionType: + bufs[0].Release() + bufs[0] = nil + bufs = append(bufs, n.buf) + // buffer is zeroed, but 0 may not be a valid type code + if dt.TypeCodes()[0] != 0 { + bufs[1] = memory.NewResizableBuffer(n.mem) + bufs[1].Resize(n.len) + defer bufs[1].Release() + memory.Set(bufs[1].Bytes(), byte(dt.TypeCodes()[0])) + } + + // for sparse unions we create children with the same length + childLen := n.len + if dt.Mode() == arrow.DenseMode { + // for dense unions, offsets are all 0 and make children + // with length 1 + bufs = append(bufs, n.buf) + childLen = 1 + } + for i := range dt.Fields() { + childData[i] = n.createChild(dt, i, childLen) + defer childData[i].Release() + } + } + + out := NewData(n.dt, n.len, bufs, childData, n.len, 0) + if dictData != nil { + out.SetDictionary(dictData) + } + return out +} + +func (n *nullArrayFactory) createChild(dt arrow.DataType, i, length int) *Data { + childFactory := &nullArrayFactory{ + mem: n.mem, dt: n.dt.(arrow.NestedType).Fields()[i].Type, + len: length, buf: n.buf} + return childFactory.create() +} + +// MakeArrayOfNull creates an array of size length which is all null of the given data type. +func MakeArrayOfNull(mem memory.Allocator, dt arrow.DataType, length int) arrow.Array { + if dt.ID() == arrow.NULL { + return NewNull(length) + } + + data := (&nullArrayFactory{mem: mem, dt: dt, len: length}).create() + defer data.Release() + return MakeFromData(data) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go new file mode 100644 index 00000000..45d11546 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package arrio exposes functions to manipulate records, exposing and using +// interfaces not unlike the ones defined in the stdlib io package. +package arrio + +import ( + "errors" + "io" + + "github.com/apache/arrow/go/v12/arrow" +) + +// Reader is the interface that wraps the Read method. +type Reader interface { + // Read reads the current record from the underlying stream and an error, if any. + // When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). + Read() (arrow.Record, error) +} + +// ReaderAt is the interface that wraps the ReadAt method. +type ReaderAt interface { + // ReadAt reads the i-th record from the underlying stream and an error, if any. + ReadAt(i int64) (arrow.Record, error) +} + +// Writer is the interface that wraps the Write method. +type Writer interface { + Write(rec arrow.Record) error +} + +// Copy copies all the records available from src to dst. +// Copy returns the number of records copied and the first error +// encountered while copying, if any. +// +// A successful Copy returns err == nil, not err == EOF. Because Copy is +// defined to read from src until EOF, it does not treat an EOF from Read as an +// error to be reported. +func Copy(dst Writer, src Reader) (n int64, err error) { + for { + rec, err := src.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return n, nil + } + return n, err + } + err = dst.Write(rec) + if err != nil { + return n, err + } + n++ + } +} + +// CopyN copies n records (or until an error) from src to dst. It returns the +// number of records copied and the earliest error encountered while copying. On +// return, written == n if and only if err == nil. +func CopyN(dst Writer, src Reader, n int64) (written int64, err error) { + for ; written < n; written++ { + rec, err := src.Read() + if err != nil { + if errors.Is(err, io.EOF) && written == n { + return written, nil + } + return written, err + } + err = dst.Write(rec) + if err != nil { + return written, err + } + } + + if written != n && err == nil { + err = io.EOF + } + return written, err +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/Makefile b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/Makefile new file mode 100644 index 00000000..12dd1d34 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/Makefile @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this converts rotate instructions from "ro[lr] " -> "ro[lr] , 1" for yasm compatibility +PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/' + +C2GOASM=c2goasm +CC=clang-11 +C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 \ + -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib +ASM_FLAGS_AVX2=-mavx2 -mfma +ASM_FLAGS_SSE4=-msse4 +ASM_FLAGS_BMI2=-mbmi2 +ASM_FLAGS_POPCNT=-mpopcnt + +C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \ + -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') + +.PHONEY: assembly + +INTEL_SOURCES := \ + bitmap_ops_avx2_amd64.s bitmap_ops_sse4_amd64.s + +# +# ARROW-15336: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support. +# min_max_neon_arm64.s was generated by asm2plan9s. +# And manually formatted it as the Arm64 Plan9. +# + +assembly: $(INTEL_SOURCES) + +_lib/bitmap_ops_avx2_amd64.s: _lib/bitmap_ops.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/bitmap_ops_sse4_amd64.s: _lib/bitmap_ops.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +bitmap_ops_avx2_amd64.s: _lib/bitmap_ops_avx2_amd64.s + $(C2GOASM) -a -f $^ $@ + +bitmap_ops_sse4_amd64.s: _lib/bitmap_ops_sse4_amd64.s + $(C2GOASM) -a -f $^ $@ + +clean: + rm -f $(INTEL_SOURCES) + rm -f $(addprefix _lib/,$(INTEL_SOURCES)) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops.go new file mode 100644 index 00000000..7db750a6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops.go @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutil + +func alignedBitAndGo(left, right, out []byte) { + var ( + nbytes = len(out) + i = 0 + ) + if nbytes > uint64SizeBytes { + // case where we have enough bytes to operate on words + leftWords := bytesToUint64(left[i:]) + rightWords := bytesToUint64(right[i:]) + outWords := bytesToUint64(out[i:]) + + for w := range outWords { + outWords[w] = leftWords[w] & rightWords[w] + } + + i += len(outWords) * uint64SizeBytes + } + // grab any remaining bytes that were fewer than a word + for ; i < nbytes; i++ { + out[i] = left[i] & right[i] + } +} + +func alignedBitAndNotGo(left, right, out []byte) { + var ( + nbytes = len(out) + i = 0 + ) + if nbytes > uint64SizeBytes { + // case where we have enough bytes to operate on words + leftWords := bytesToUint64(left[i:]) + rightWords := bytesToUint64(right[i:]) + outWords := bytesToUint64(out[i:]) + + for w := range outWords { + outWords[w] = leftWords[w] &^ rightWords[w] + } + + i += len(outWords) * uint64SizeBytes + } + // grab any remaining bytes that were fewer than a word + for ; i < nbytes; i++ { + out[i] = left[i] &^ right[i] + } +} + +func alignedBitOrGo(left, right, out []byte) { + var ( + nbytes = len(out) + i = 0 + ) + if nbytes > uint64SizeBytes { + // case where we have enough bytes to operate on words + leftWords := bytesToUint64(left[i:]) + rightWords := bytesToUint64(right[i:]) + outWords := bytesToUint64(out[i:]) + + for w := range outWords { + outWords[w] = leftWords[w] | rightWords[w] + } + + i += len(outWords) * uint64SizeBytes + } + // grab any remaining bytes that were fewer than a word + for ; i < nbytes; i++ { + out[i] = left[i] | right[i] + } +} + +func alignedBitXorGo(left, right, out []byte) { + var ( + nbytes = len(out) + i = 0 + ) + if nbytes > uint64SizeBytes { + // case where we have enough bytes to operate on words + leftWords := bytesToUint64(left[i:]) + rightWords := bytesToUint64(right[i:]) + outWords := bytesToUint64(out[i:]) + + for w := range outWords { + outWords[w] = leftWords[w] ^ rightWords[w] + } + + i += len(outWords) * uint64SizeBytes + } + // grab any remaining bytes that were fewer than a word + for ; i < nbytes; i++ { + out[i] = left[i] ^ right[i] + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_amd64.go new file mode 100644 index 00000000..ad0fd674 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_amd64.go @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +import "golang.org/x/sys/cpu" + +func init() { + if cpu.X86.HasAVX2 { + bitAndOp.opAligned = bitmapAlignedAndAVX2 + bitOrOp.opAligned = bitmapAlignedOrAVX2 + bitAndNotOp.opAligned = bitmapAlignedAndNotAVX2 + bitXorOp.opAligned = bitmapAlignedXorAVX2 + } else if cpu.X86.HasSSE42 { + bitAndOp.opAligned = bitmapAlignedAndSSE4 + bitOrOp.opAligned = bitmapAlignedOrSSE4 + bitAndNotOp.opAligned = bitmapAlignedAndNotSSE4 + bitXorOp.opAligned = bitmapAlignedXorSSE4 + } else { + bitAndOp.opAligned = alignedBitAndGo + bitOrOp.opAligned = alignedBitOrGo + bitAndNotOp.opAligned = alignedBitAndNotGo + bitXorOp.opAligned = alignedBitXorGo + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_arm64.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_arm64.go new file mode 100644 index 00000000..28d95d84 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_arm64.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +func init() { + bitAndOp.opAligned = alignedBitAndGo + bitOrOp.opAligned = alignedBitOrGo + bitAndNotOp.opAligned = alignedBitAndNotGo + bitXorOp.opAligned = alignedBitXorGo +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.go new file mode 100644 index 00000000..1c01bd0f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +import ( + "unsafe" +) + +//go:noescape +func _bitmap_aligned_and_avx2(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedAndAVX2(left, right, out []byte) { + _bitmap_aligned_and_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_or_avx2(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedOrAVX2(left, right, out []byte) { + _bitmap_aligned_or_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_and_not_avx2(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedAndNotAVX2(left, right, out []byte) { + _bitmap_aligned_and_not_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_xor_avx2(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedXorAVX2(left, right, out []byte) { + _bitmap_aligned_xor_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.s new file mode 100644 index 00000000..00172e86 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.s @@ -0,0 +1,373 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_bitmap_aligned_and_avx2(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB0_12 + LONG $0x7ff98348 // cmp rcx, 127 + JA LBB0_7 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB0_3 + +LBB0_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd3970f41 // seta r11b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0x8441; BYTE $0xdb // test r11b, bl + JNE LBB0_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB0_3 + WORD $0x8949; BYTE $0xca // mov r10, rcx + LONG $0x80e28349 // and r10, -128 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB0_10: + LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8] + LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32] + LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64] + LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96] + LONG $0x547ca1c4; WORD $0x0704 // vandps ymm0, ymm0, yword [rdi + r8] + LONG $0x5474a1c4; WORD $0x074c; BYTE $0x20 // vandps ymm1, ymm1, yword [rdi + r8 + 32] + LONG $0x546ca1c4; WORD $0x0754; BYTE $0x40 // vandps ymm2, ymm2, yword [rdi + r8 + 64] + LONG $0x5464a1c4; WORD $0x075c; BYTE $0x60 // vandps ymm3, ymm3, yword [rdi + r8 + 96] + LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0 + LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1 + LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2 + LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3 + LONG $0x80e88349 // sub r8, -128 + WORD $0x394d; BYTE $0xc2 // cmp r10, r8 + JNE LBB0_10 + WORD $0x3949; BYTE $0xca // cmp r10, rcx + JE LBB0_12 + +LBB0_3: + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB0_5 + +LBB0_4: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17042242 // and al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x01c28349 // add r10, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB0_4 + +LBB0_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB0_12 + +LBB0_6: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17042242 // and al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1] + LONG $0x17442242; BYTE $0x01 // and al, byte [rdi + r10 + 1] + LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al + LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2] + LONG $0x17442242; BYTE $0x02 // and al, byte [rdi + r10 + 2] + LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al + LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3] + LONG $0x17442242; BYTE $0x03 // and al, byte [rdi + r10 + 3] + LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al + LONG $0x04c28349 // add r10, 4 + WORD $0x394c; BYTE $0xd1 // cmp rcx, r10 + JNE LBB0_6 + +LBB0_12: + VZEROUPPER + RET + +TEXT ยท_bitmap_aligned_or_avx2(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB1_12 + LONG $0x7ff98348 // cmp rcx, 127 + JA LBB1_7 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB1_3 + +LBB1_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd3970f41 // seta r11b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0x8441; BYTE $0xdb // test r11b, bl + JNE LBB1_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB1_3 + WORD $0x8949; BYTE $0xca // mov r10, rcx + LONG $0x80e28349 // and r10, -128 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB1_10: + LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8] + LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32] + LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64] + LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96] + LONG $0x567ca1c4; WORD $0x0704 // vorps ymm0, ymm0, yword [rdi + r8] + LONG $0x5674a1c4; WORD $0x074c; BYTE $0x20 // vorps ymm1, ymm1, yword [rdi + r8 + 32] + LONG $0x566ca1c4; WORD $0x0754; BYTE $0x40 // vorps ymm2, ymm2, yword [rdi + r8 + 64] + LONG $0x5664a1c4; WORD $0x075c; BYTE $0x60 // vorps ymm3, ymm3, yword [rdi + r8 + 96] + LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0 + LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1 + LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2 + LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3 + LONG $0x80e88349 // sub r8, -128 + WORD $0x394d; BYTE $0xc2 // cmp r10, r8 + JNE LBB1_10 + WORD $0x3949; BYTE $0xca // cmp r10, rcx + JE LBB1_12 + +LBB1_3: + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB1_5 + +LBB1_4: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17040a42 // or al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x01c28349 // add r10, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB1_4 + +LBB1_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB1_12 + +LBB1_6: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17040a42 // or al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1] + LONG $0x17440a42; BYTE $0x01 // or al, byte [rdi + r10 + 1] + LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al + LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2] + LONG $0x17440a42; BYTE $0x02 // or al, byte [rdi + r10 + 2] + LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al + LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3] + LONG $0x17440a42; BYTE $0x03 // or al, byte [rdi + r10 + 3] + LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al + LONG $0x04c28349 // add r10, 4 + WORD $0x394c; BYTE $0xd1 // cmp rcx, r10 + JNE LBB1_6 + +LBB1_12: + VZEROUPPER + RET + +TEXT ยท_bitmap_aligned_and_not_avx2(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB2_12 + LONG $0x7ff98348 // cmp rcx, 127 + JA LBB2_7 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + JMP LBB2_3 + +LBB2_7: + LONG $0x0a048d4c // lea r8, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd3970f41 // seta r11b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf8 // cmp r8, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd2970f41 // seta r10b + WORD $0x3949; BYTE $0xf0 // cmp r8, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + WORD $0x8441; BYTE $0xdb // test r11b, bl + JNE LBB2_3 + WORD $0x2045; BYTE $0xca // and r10b, r9b + JNE LBB2_3 + WORD $0x8949; BYTE $0xc8 // mov r8, rcx + LONG $0x80e08349 // and r8, -128 + WORD $0xc031 // xor eax, eax + +LBB2_10: + LONG $0x0410fcc5; BYTE $0x06 // vmovups ymm0, yword [rsi + rax] + LONG $0x4c10fcc5; WORD $0x2006 // vmovups ymm1, yword [rsi + rax + 32] + LONG $0x5410fcc5; WORD $0x4006 // vmovups ymm2, yword [rsi + rax + 64] + LONG $0x5c10fcc5; WORD $0x6006 // vmovups ymm3, yword [rsi + rax + 96] + LONG $0x0455fcc5; BYTE $0x07 // vandnps ymm0, ymm0, yword [rdi + rax] + LONG $0x4c55f4c5; WORD $0x2007 // vandnps ymm1, ymm1, yword [rdi + rax + 32] + LONG $0x5455ecc5; WORD $0x4007 // vandnps ymm2, ymm2, yword [rdi + rax + 64] + LONG $0x5c55e4c5; WORD $0x6007 // vandnps ymm3, ymm3, yword [rdi + rax + 96] + LONG $0x0411fcc5; BYTE $0x02 // vmovups yword [rdx + rax], ymm0 + LONG $0x4c11fcc5; WORD $0x2002 // vmovups yword [rdx + rax + 32], ymm1 + LONG $0x5411fcc5; WORD $0x4002 // vmovups yword [rdx + rax + 64], ymm2 + LONG $0x5c11fcc5; WORD $0x6002 // vmovups yword [rdx + rax + 96], ymm3 + LONG $0x80e88348 // sub rax, -128 + WORD $0x3949; BYTE $0xc0 // cmp r8, rax + JNE LBB2_10 + WORD $0x3949; BYTE $0xc8 // cmp r8, rcx + JE LBB2_12 + +LBB2_3: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + WORD $0xf749; BYTE $0xd1 // not r9 + WORD $0xc1f6; BYTE $0x01 // test cl, 1 + JE LBB2_5 + LONG $0x06048a42 // mov al, byte [rsi + r8] + WORD $0xd0f6 // not al + LONG $0x07042242 // and al, byte [rdi + r8] + LONG $0x02048842 // mov byte [rdx + r8], al + LONG $0x01c88349 // or r8, 1 + +LBB2_5: + WORD $0x0149; BYTE $0xc9 // add r9, rcx + JE LBB2_12 + +LBB2_6: + LONG $0x04b60f42; BYTE $0x06 // movzx eax, byte [rsi + r8] + WORD $0xd0f6 // not al + LONG $0x07042242 // and al, byte [rdi + r8] + LONG $0x02048842 // mov byte [rdx + r8], al + LONG $0x44b60f42; WORD $0x0106 // movzx eax, byte [rsi + r8 + 1] + WORD $0xd0f6 // not al + LONG $0x07442242; BYTE $0x01 // and al, byte [rdi + r8 + 1] + LONG $0x02448842; BYTE $0x01 // mov byte [rdx + r8 + 1], al + LONG $0x02c08349 // add r8, 2 + WORD $0x394c; BYTE $0xc1 // cmp rcx, r8 + JNE LBB2_6 + +LBB2_12: + VZEROUPPER + RET + +TEXT ยท_bitmap_aligned_xor_avx2(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB3_12 + LONG $0x7ff98348 // cmp rcx, 127 + JA LBB3_7 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB3_3 + +LBB3_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd3970f41 // seta r11b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0x8441; BYTE $0xdb // test r11b, bl + JNE LBB3_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB3_3 + WORD $0x8949; BYTE $0xca // mov r10, rcx + LONG $0x80e28349 // and r10, -128 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB3_10: + LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8] + LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32] + LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64] + LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96] + LONG $0x577ca1c4; WORD $0x0704 // vxorps ymm0, ymm0, yword [rdi + r8] + LONG $0x5774a1c4; WORD $0x074c; BYTE $0x20 // vxorps ymm1, ymm1, yword [rdi + r8 + 32] + LONG $0x576ca1c4; WORD $0x0754; BYTE $0x40 // vxorps ymm2, ymm2, yword [rdi + r8 + 64] + LONG $0x5764a1c4; WORD $0x075c; BYTE $0x60 // vxorps ymm3, ymm3, yword [rdi + r8 + 96] + LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0 + LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1 + LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2 + LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3 + LONG $0x80e88349 // sub r8, -128 + WORD $0x394d; BYTE $0xc2 // cmp r10, r8 + JNE LBB3_10 + WORD $0x3949; BYTE $0xca // cmp r10, rcx + JE LBB3_12 + +LBB3_3: + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB3_5 + +LBB3_4: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17043242 // xor al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x01c28349 // add r10, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB3_4 + +LBB3_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB3_12 + +LBB3_6: + LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10] + LONG $0x17043242 // xor al, byte [rdi + r10] + LONG $0x12048842 // mov byte [rdx + r10], al + LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1] + LONG $0x17443242; BYTE $0x01 // xor al, byte [rdi + r10 + 1] + LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al + LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2] + LONG $0x17443242; BYTE $0x02 // xor al, byte [rdi + r10 + 2] + LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al + LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3] + LONG $0x17443242; BYTE $0x03 // xor al, byte [rdi + r10 + 3] + LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al + LONG $0x04c28349 // add r10, 4 + WORD $0x394c; BYTE $0xd1 // cmp rcx, r10 + JNE LBB3_6 + +LBB3_12: + VZEROUPPER + RET diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_noasm.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_noasm.go new file mode 100644 index 00000000..e2534779 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_noasm.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build noasm +// +build noasm + +package bitutil + +func init() { + bitAndOp.opAligned = alignedBitAndGo + bitOrOp.opAligned = alignedBitOrGo + bitAndNotOp.opAligned = alignedBitAndNotGo + bitXorOp.opAligned = alignedBitXorGo +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_ppc64le.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_ppc64le.go new file mode 100644 index 00000000..28d95d84 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_ppc64le.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +func init() { + bitAndOp.opAligned = alignedBitAndGo + bitOrOp.opAligned = alignedBitOrGo + bitAndNotOp.opAligned = alignedBitAndNotGo + bitXorOp.opAligned = alignedBitXorGo +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_s390x.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_s390x.go new file mode 100644 index 00000000..28d95d84 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_s390x.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +func init() { + bitAndOp.opAligned = alignedBitAndGo + bitOrOp.opAligned = alignedBitOrGo + bitAndNotOp.opAligned = alignedBitAndNotGo + bitXorOp.opAligned = alignedBitXorGo +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.go new file mode 100644 index 00000000..f16bce12 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package bitutil + +import ( + "unsafe" +) + +//go:noescape +func _bitmap_aligned_and_sse4(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedAndSSE4(left, right, out []byte) { + _bitmap_aligned_and_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_or_sse4(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedOrSSE4(left, right, out []byte) { + _bitmap_aligned_or_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_and_not_sse4(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedAndNotSSE4(left, right, out []byte) { + _bitmap_aligned_and_not_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} + +//go:noescape +func _bitmap_aligned_xor_sse4(left, right, out unsafe.Pointer, length int64) + +func bitmapAlignedXorSSE4(left, right, out []byte) { + _bitmap_aligned_xor_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out))) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.s new file mode 100644 index 00000000..c15e1862 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.s @@ -0,0 +1,501 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_bitmap_aligned_and_sse4(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB0_16 + LONG $0x1ff98348 // cmp rcx, 31 + JA LBB0_7 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB0_3: + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB0_5 + +LBB0_4: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f042242 // and al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x01c38349 // add r11, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB0_4 + +LBB0_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB0_16 + +LBB0_6: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f042242 // and al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1] + LONG $0x1f442242; BYTE $0x01 // and al, byte [rdi + r11 + 1] + LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al + LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2] + LONG $0x1f442242; BYTE $0x02 // and al, byte [rdi + r11 + 2] + LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al + LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3] + LONG $0x1f442242; BYTE $0x03 // and al, byte [rdi + r11 + 3] + LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al + LONG $0x04c38349 // add r11, 4 + WORD $0x394c; BYTE $0xd9 // cmp rcx, r11 + JNE LBB0_6 + JMP LBB0_16 + +LBB0_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd2970f41 // seta r10b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0x8441; BYTE $0xda // test r10b, bl + JNE LBB0_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB0_3 + WORD $0x8949; BYTE $0xcb // mov r11, rcx + LONG $0xe0e38349 // and r11, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc1 // mov r9, rax + LONG $0x05e9c149 // shr r9, 5 + LONG $0x01c18349 // add r9, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB0_10 + WORD $0x894d; BYTE $0xca // mov r10, r9 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB0_12: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32] + LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48] + LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32] + WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0 + LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48] + WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1 + LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2 + LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0 + LONG $0x40c08349 // add r8, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB0_12 + LONG $0x01c1f641 // test r9b, 1 + JE LBB0_15 + +LBB0_14: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + +LBB0_15: + WORD $0x3949; BYTE $0xcb // cmp r11, rcx + JNE LBB0_3 + +LBB0_16: + RET + +LBB0_10: + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + LONG $0x01c1f641 // test r9b, 1 + JNE LBB0_14 + JMP LBB0_15 + +TEXT ยท_bitmap_aligned_or_sse4(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB1_16 + LONG $0x1ff98348 // cmp rcx, 31 + JA LBB1_7 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB1_3: + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB1_5 + +LBB1_4: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f040a42 // or al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x01c38349 // add r11, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB1_4 + +LBB1_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB1_16 + +LBB1_6: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f040a42 // or al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1] + LONG $0x1f440a42; BYTE $0x01 // or al, byte [rdi + r11 + 1] + LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al + LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2] + LONG $0x1f440a42; BYTE $0x02 // or al, byte [rdi + r11 + 2] + LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al + LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3] + LONG $0x1f440a42; BYTE $0x03 // or al, byte [rdi + r11 + 3] + LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al + LONG $0x04c38349 // add r11, 4 + WORD $0x394c; BYTE $0xd9 // cmp rcx, r11 + JNE LBB1_6 + JMP LBB1_16 + +LBB1_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd2970f41 // seta r10b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0x8441; BYTE $0xda // test r10b, bl + JNE LBB1_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB1_3 + WORD $0x8949; BYTE $0xcb // mov r11, rcx + LONG $0xe0e38349 // and r11, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc1 // mov r9, rax + LONG $0x05e9c149 // shr r9, 5 + LONG $0x01c18349 // add r9, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB1_10 + WORD $0x894d; BYTE $0xca // mov r10, r9 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB1_12: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32] + LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48] + LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32] + WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0 + LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48] + WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1 + LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2 + LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0 + LONG $0x40c08349 // add r8, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB1_12 + LONG $0x01c1f641 // test r9b, 1 + JE LBB1_15 + +LBB1_14: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + +LBB1_15: + WORD $0x3949; BYTE $0xcb // cmp r11, rcx + JNE LBB1_3 + +LBB1_16: + RET + +LBB1_10: + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + LONG $0x01c1f641 // test r9b, 1 + JNE LBB1_14 + JMP LBB1_15 + +TEXT ยท_bitmap_aligned_and_not_sse4(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB2_16 + LONG $0x1ff98348 // cmp rcx, 31 + JA LBB2_7 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB2_3: + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0xc1f6; BYTE $0x01 // test cl, 1 + JE LBB2_5 + LONG $0x1e048a42 // mov al, byte [rsi + r11] + WORD $0xd0f6 // not al + LONG $0x1f042242 // and al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x01cb8349 // or r11, 1 + +LBB2_5: + WORD $0x0149; BYTE $0xc8 // add r8, rcx + JE LBB2_16 + +LBB2_6: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + WORD $0xd0f6 // not al + LONG $0x1f042242 // and al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1] + WORD $0xd0f6 // not al + LONG $0x1f442242; BYTE $0x01 // and al, byte [rdi + r11 + 1] + LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al + LONG $0x02c38349 // add r11, 2 + WORD $0x394c; BYTE $0xd9 // cmp rcx, r11 + JNE LBB2_6 + JMP LBB2_16 + +LBB2_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd2970f41 // seta r10b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0x8441; BYTE $0xda // test r10b, bl + JNE LBB2_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB2_3 + WORD $0x8949; BYTE $0xcb // mov r11, rcx + LONG $0xe0e38349 // and r11, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc1 // mov r9, rax + LONG $0x05e9c149 // shr r9, 5 + LONG $0x01c18349 // add r9, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB2_10 + WORD $0x894d; BYTE $0xca // mov r10, r9 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB2_12: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32] + LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48] + LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32] + WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0 + LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48] + WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1 + LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2 + LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0 + LONG $0x40c08349 // add r8, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB2_12 + LONG $0x01c1f641 // test r9b, 1 + JE LBB2_15 + +LBB2_14: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + +LBB2_15: + WORD $0x3949; BYTE $0xcb // cmp r11, rcx + JNE LBB2_3 + +LBB2_16: + RET + +LBB2_10: + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + LONG $0x01c1f641 // test r9b, 1 + JNE LBB2_14 + JMP LBB2_15 + +TEXT ยท_bitmap_aligned_xor_sse4(SB), $0-32 + + MOVQ left+0(FP), DI + MOVQ right+8(FP), SI + MOVQ out+16(FP), DX + MOVQ length+24(FP), CX + + WORD $0x8548; BYTE $0xc9 // test rcx, rcx + JLE LBB3_16 + LONG $0x1ff98348 // cmp rcx, 31 + JA LBB3_7 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB3_3: + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0xf749; BYTE $0xd0 // not r8 + WORD $0x0149; BYTE $0xc8 // add r8, rcx + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x03e18349 // and r9, 3 + JE LBB3_5 + +LBB3_4: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f043242 // xor al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x01c38349 // add r11, 1 + LONG $0xffc18349 // add r9, -1 + JNE LBB3_4 + +LBB3_5: + LONG $0x03f88349 // cmp r8, 3 + JB LBB3_16 + +LBB3_6: + LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11] + LONG $0x1f043242 // xor al, byte [rdi + r11] + LONG $0x1a048842 // mov byte [rdx + r11], al + LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1] + LONG $0x1f443242; BYTE $0x01 // xor al, byte [rdi + r11 + 1] + LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al + LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2] + LONG $0x1f443242; BYTE $0x02 // xor al, byte [rdi + r11 + 2] + LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al + LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3] + LONG $0x1f443242; BYTE $0x03 // xor al, byte [rdi + r11 + 3] + LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al + LONG $0x04c38349 // add r11, 4 + WORD $0x394c; BYTE $0xd9 // cmp rcx, r11 + JNE LBB3_6 + JMP LBB3_16 + +LBB3_7: + LONG $0x0a0c8d4c // lea r9, [rdx + rcx] + LONG $0x0f048d48 // lea rax, [rdi + rcx] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd2970f41 // seta r10b + LONG $0x0e048d48 // lea rax, [rsi + rcx] + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + LONG $0xd0970f41 // seta r8b + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + LONG $0xd1970f41 // seta r9b + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0x8441; BYTE $0xda // test r10b, bl + JNE LBB3_3 + WORD $0x2045; BYTE $0xc8 // and r8b, r9b + JNE LBB3_3 + WORD $0x8949; BYTE $0xcb // mov r11, rcx + LONG $0xe0e38349 // and r11, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc1 // mov r9, rax + LONG $0x05e9c149 // shr r9, 5 + LONG $0x01c18349 // add r9, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB3_10 + WORD $0x894d; BYTE $0xca // mov r10, r9 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB3_12: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32] + LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48] + LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32] + WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0 + LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48] + WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1 + LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2 + LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0 + LONG $0x40c08349 // add r8, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB3_12 + LONG $0x01c1f641 // test r9b, 1 + JE LBB3_15 + +LBB3_14: + LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8] + LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16] + LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8] + WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0 + LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16] + WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1 + LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2 + LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0 + +LBB3_15: + WORD $0x3949; BYTE $0xcb // cmp r11, rcx + JNE LBB3_3 + +LBB3_16: + RET + +LBB3_10: + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + LONG $0x01c1f641 // test r9b, 1 + JNE LBB3_14 + JMP LBB3_15 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go new file mode 100644 index 00000000..d38ba5d4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go @@ -0,0 +1,747 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutil + +import ( + "bytes" + "errors" + "math/bits" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// BitmapReader is a simple bitmap reader for a byte slice. +type BitmapReader struct { + bitmap []byte + pos int + len int + + current byte + byteOffset int + bitOffset int +} + +// NewBitmapReader creates and returns a new bitmap reader for the given bitmap +func NewBitmapReader(bitmap []byte, offset, length int) *BitmapReader { + curbyte := byte(0) + if length > 0 && bitmap != nil { + curbyte = bitmap[offset/8] + } + return &BitmapReader{ + bitmap: bitmap, + byteOffset: offset / 8, + bitOffset: offset % 8, + current: curbyte, + len: length, + } +} + +// Set returns true if the current bit is set +func (b *BitmapReader) Set() bool { + return (b.current & (1 << b.bitOffset)) != 0 +} + +// NotSet returns true if the current bit is not set +func (b *BitmapReader) NotSet() bool { + return (b.current & (1 << b.bitOffset)) == 0 +} + +// Next advances the reader to the next bit in the bitmap. +func (b *BitmapReader) Next() { + b.bitOffset++ + b.pos++ + if b.bitOffset == 8 { + b.bitOffset = 0 + b.byteOffset++ + if b.pos < b.len { + b.current = b.bitmap[int(b.byteOffset)] + } + } +} + +// Pos returns the current bit position in the bitmap that the reader is looking at +func (b *BitmapReader) Pos() int { return b.pos } + +// Len returns the total number of bits in the bitmap +func (b *BitmapReader) Len() int { return b.len } + +// BitmapWriter is a simple writer for writing bitmaps to byte slices +type BitmapWriter struct { + buf []byte + pos int + length int + + curByte uint8 + bitMask uint8 + byteOffset int +} + +// NewBitmapWriter returns a sequential bitwise writer that preserves surrounding +// bit values as it writes. +func NewBitmapWriter(bitmap []byte, start, length int) *BitmapWriter { + ret := &BitmapWriter{ + buf: bitmap, + length: length, + byteOffset: start / 8, + bitMask: BitMask[start%8], + } + if length > 0 { + ret.curByte = bitmap[int(ret.byteOffset)] + } + return ret +} + +// Reset resets the position and view of the slice to restart writing a bitmap +// to the same byte slice. +func (b *BitmapWriter) Reset(start, length int) { + b.pos = 0 + b.byteOffset = start / 8 + b.bitMask = BitMask[start%8] + b.length = length + if b.length > 0 { + b.curByte = b.buf[int(b.byteOffset)] + } +} + +func (b *BitmapWriter) Pos() int { return b.pos } +func (b *BitmapWriter) Set() { b.curByte |= b.bitMask } +func (b *BitmapWriter) Clear() { b.curByte &= ^b.bitMask } + +// Next increments the writer to the next bit for writing. +func (b *BitmapWriter) Next() { + b.bitMask = b.bitMask << 1 + b.pos++ + if b.bitMask == 0 { + b.bitMask = 0x01 + b.buf[b.byteOffset] = b.curByte + b.byteOffset++ + if b.pos < b.length { + b.curByte = b.buf[int(b.byteOffset)] + } + } +} + +// AppendBools writes a series of booleans to the bitmapwriter and returns +// the number of remaining bytes left in the buffer for writing. +func (b *BitmapWriter) AppendBools(in []bool) int { + space := min(b.length-b.pos, len(in)) + if space == 0 { + return 0 + } + + bitOffset := bits.TrailingZeros32(uint32(b.bitMask)) + // location that the first byte needs to be written to for appending + appslice := b.buf[int(b.byteOffset) : b.byteOffset+int(BytesForBits(int64(bitOffset+space)))] + // update everything but curByte + appslice[0] = b.curByte + for i, b := range in[:space] { + if b { + SetBit(appslice, i+bitOffset) + } else { + ClearBit(appslice, i+bitOffset) + } + } + + b.pos += space + b.bitMask = BitMask[(bitOffset+space)%8] + b.byteOffset += (bitOffset + space) / 8 + b.curByte = appslice[len(appslice)-1] + + return space +} + +// Finish flushes the final byte out to the byteslice in case it was not already +// on a byte aligned boundary. +func (b *BitmapWriter) Finish() { + if b.length > 0 && (b.bitMask != 0x01 || b.pos < b.length) { + b.buf[int(b.byteOffset)] = b.curByte + } +} + +// BitmapWordReader is a reader for bitmaps that reads a word at a time (a word being an 8 byte uint64) +// and then provides functions to grab the individual trailing bytes after the last word +type BitmapWordReader struct { + bitmap []byte + offset int + nwords int + trailingBits int + trailingBytes int + curword uint64 +} + +// NewBitmapWordReader sets up a word reader, calculates the number of trailing bits and +// number of trailing bytes, along with the number of words. +func NewBitmapWordReader(bitmap []byte, offset, length int) *BitmapWordReader { + bitoffset := offset % 8 + byteOffset := offset / 8 + bm := &BitmapWordReader{ + offset: bitoffset, + bitmap: bitmap[byteOffset : byteOffset+int(BytesForBits(int64(bitoffset+length)))], + // decrement wordcount by 1 as we may touch two adjacent words in one iteration + nwords: length/int(unsafe.Sizeof(uint64(0))*8) - 1, + } + if bm.nwords < 0 { + bm.nwords = 0 + } + bm.trailingBits = length - bm.nwords*int(unsafe.Sizeof(uint64(0)))*8 + bm.trailingBytes = int(BytesForBits(int64(bm.trailingBits))) + + if bm.nwords > 0 { + bm.curword = toFromLEFunc(endian.Native.Uint64(bm.bitmap)) + } else if length > 0 { + setLSB(&bm.curword, bm.bitmap[0]) + } + return bm +} + +// NextWord returns the next full word read from the bitmap, should not be called +// if Words() is 0 as it will step outside of the bounds of the bitmap slice and panic. +// +// We don't perform the bounds checking in order to improve performance. +func (bm *BitmapWordReader) NextWord() uint64 { + bm.bitmap = bm.bitmap[unsafe.Sizeof(bm.curword):] + word := bm.curword + nextWord := toFromLEFunc(endian.Native.Uint64(bm.bitmap)) + if bm.offset != 0 { + // combine two adjacent words into one word + // |<------ next ----->|<---- current ---->| + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // | | offset + // v v + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // |<------ word ----->| + word >>= uint64(bm.offset) + word |= nextWord << (int64(unsafe.Sizeof(uint64(0))*8) - int64(bm.offset)) + } + bm.curword = nextWord + return word +} + +// NextTrailingByte returns the next trailing byte of the bitmap after the last word +// along with the number of valid bits in that byte. When validBits < 8, that +// is the last byte. +// +// If the bitmap ends on a byte alignment, then the last byte can also return 8 valid bits. +// Thus the TrailingBytes function should be used to know how many trailing bytes to read. +func (bm *BitmapWordReader) NextTrailingByte() (val byte, validBits int) { + debug.Assert(bm.trailingBits > 0, "next trailing byte called with no trailing bits") + + if bm.trailingBits <= 8 { + // last byte + validBits = bm.trailingBits + bm.trailingBits = 0 + rdr := NewBitmapReader(bm.bitmap, bm.offset, validBits) + for i := 0; i < validBits; i++ { + val >>= 1 + if rdr.Set() { + val |= 0x80 + } + rdr.Next() + } + val >>= (8 - validBits) + return + } + + bm.bitmap = bm.bitmap[1:] + nextByte := bm.bitmap[0] + val = getLSB(bm.curword) + if bm.offset != 0 { + val >>= byte(bm.offset) + val |= nextByte << (8 - bm.offset) + } + setLSB(&bm.curword, nextByte) + bm.trailingBits -= 8 + bm.trailingBytes-- + validBits = 8 + return +} + +func (bm *BitmapWordReader) Words() int { return bm.nwords } +func (bm *BitmapWordReader) TrailingBytes() int { return bm.trailingBytes } + +// BitmapWordWriter is a bitmap writer for writing a full word at a time (a word being +// a uint64). After the last full word is written, PutNextTrailingByte can be used to +// write the remaining trailing bytes. +type BitmapWordWriter struct { + bitmap []byte + offset int + len int + + bitMask uint64 + currentWord uint64 +} + +// NewBitmapWordWriter initializes a new bitmap word writer which will start writing +// into the byte slice at bit offset start, expecting to write len bits. +func NewBitmapWordWriter(bitmap []byte, start, len int) *BitmapWordWriter { + ret := &BitmapWordWriter{ + bitmap: bitmap[start/8:], + len: len, + offset: start % 8, + bitMask: (uint64(1) << uint64(start%8)) - 1, + } + + if ret.offset != 0 { + if ret.len >= int(unsafe.Sizeof(uint64(0))*8) { + ret.currentWord = toFromLEFunc(endian.Native.Uint64(ret.bitmap)) + } else if ret.len > 0 { + setLSB(&ret.currentWord, ret.bitmap[0]) + } + } + return ret +} + +// PutNextWord writes the given word to the bitmap, potentially splitting across +// two adjacent words. +func (bm *BitmapWordWriter) PutNextWord(word uint64) { + sz := int(unsafe.Sizeof(word)) + if bm.offset != 0 { + // split one word into two adjacent words, don't touch unused bits + // |<------ word ----->| + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // | | + // v v offset + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // |<------ next ----->|<---- current ---->| + word = (word << uint64(bm.offset)) | (word >> (int64(sz*8) - int64(bm.offset))) + next := toFromLEFunc(endian.Native.Uint64(bm.bitmap[sz:])) + bm.currentWord = (bm.currentWord & bm.bitMask) | (word &^ bm.bitMask) + next = (next &^ bm.bitMask) | (word & bm.bitMask) + endian.Native.PutUint64(bm.bitmap, toFromLEFunc(bm.currentWord)) + endian.Native.PutUint64(bm.bitmap[sz:], toFromLEFunc(next)) + bm.currentWord = next + } else { + endian.Native.PutUint64(bm.bitmap, toFromLEFunc(word)) + } + bm.bitmap = bm.bitmap[sz:] +} + +// PutNextTrailingByte writes the number of bits indicated by validBits from b to +// the bitmap. +func (bm *BitmapWordWriter) PutNextTrailingByte(b byte, validBits int) { + curbyte := getLSB(bm.currentWord) + if validBits == 8 { + if bm.offset != 0 { + b = (b << bm.offset) | (b >> (8 - bm.offset)) + next := bm.bitmap[1] + curbyte = (curbyte & byte(bm.bitMask)) | (b &^ byte(bm.bitMask)) + next = (next &^ byte(bm.bitMask)) | (b & byte(bm.bitMask)) + bm.bitmap[0] = curbyte + bm.bitmap[1] = next + bm.currentWord = uint64(next) + } else { + bm.bitmap[0] = b + } + bm.bitmap = bm.bitmap[1:] + } else { + debug.Assert(validBits > 0 && validBits < 8, "invalid valid bits in bitmap word writer") + debug.Assert(BytesForBits(int64(bm.offset+validBits)) <= int64(len(bm.bitmap)), "writing trailiing byte outside of bounds of bitmap") + wr := NewBitmapWriter(bm.bitmap, int(bm.offset), validBits) + for i := 0; i < validBits; i++ { + if b&0x01 != 0 { + wr.Set() + } else { + wr.Clear() + } + wr.Next() + b >>= 1 + } + wr.Finish() + } +} + +type transferMode int8 + +const ( + transferCopy transferMode = iota + transferInvert +) + +func transferBitmap(mode transferMode, src []byte, srcOffset, length int, dst []byte, dstOffset int) { + if length == 0 { + // if there's nothing to write, end early. + return + } + + bitOffset := srcOffset % 8 + destBitOffset := dstOffset % 8 + + // slow path, one of the bitmaps are not byte aligned. + if bitOffset != 0 || destBitOffset != 0 { + rdr := NewBitmapWordReader(src, srcOffset, length) + wr := NewBitmapWordWriter(dst, dstOffset, length) + + nwords := rdr.Words() + for nwords > 0 { + nwords-- + if mode == transferInvert { + wr.PutNextWord(^rdr.NextWord()) + } else { + wr.PutNextWord(rdr.NextWord()) + } + } + nbytes := rdr.TrailingBytes() + for nbytes > 0 { + nbytes-- + bt, validBits := rdr.NextTrailingByte() + if mode == transferInvert { + bt = ^bt + } + wr.PutNextTrailingByte(bt, validBits) + } + return + } + + // fast path, both are starting with byte-aligned bitmaps + nbytes := int(BytesForBits(int64(length))) + + // shift by its byte offset + src = src[srcOffset/8:] + dst = dst[dstOffset/8:] + + // Take care of the trailing bits in the last byte + // E.g., if trailing_bits = 5, last byte should be + // - low 3 bits: new bits from last byte of data buffer + // - high 5 bits: old bits from last byte of dest buffer + trailingBits := nbytes*8 - length + trailMask := byte(uint(1)<<(8-trailingBits)) - 1 + var lastData byte + if mode == transferInvert { + for i, b := range src[:nbytes-1] { + dst[i] = ^b + } + lastData = ^src[nbytes-1] + } else { + copy(dst, src[:nbytes-1]) + lastData = src[nbytes-1] + } + + dst[nbytes-1] &= ^trailMask + dst[nbytes-1] |= lastData & trailMask +} + +// CopyBitmap copies the bitmap indicated by src, starting at bit offset srcOffset, +// and copying length bits into dst, starting at bit offset dstOffset. +func CopyBitmap(src []byte, srcOffset, length int, dst []byte, dstOffset int) { + transferBitmap(transferCopy, src, srcOffset, length, dst, dstOffset) +} + +// InvertBitmap copies a bit range of a bitmap, inverting it as it copies +// over into the destination. +func InvertBitmap(src []byte, srcOffset, length int, dst []byte, dstOffset int) { + transferBitmap(transferInvert, src, srcOffset, length, dst, dstOffset) +} + +type bitOp struct { + opWord func(uint64, uint64) uint64 + opByte func(byte, byte) byte + opAligned func(l, r, o []byte) +} + +var ( + bitAndOp = bitOp{ + opWord: func(l, r uint64) uint64 { return l & r }, + opByte: func(l, r byte) byte { return l & r }, + } + bitOrOp = bitOp{ + opWord: func(l, r uint64) uint64 { return l | r }, + opByte: func(l, r byte) byte { return l | r }, + } + bitAndNotOp = bitOp{ + opWord: func(l, r uint64) uint64 { return l &^ r }, + opByte: func(l, r byte) byte { return l &^ r }, + } + bitXorOp = bitOp{ + opWord: func(l, r uint64) uint64 { return l ^ r }, + opByte: func(l, r byte) byte { return l ^ r }, + } +) + +func alignedBitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + debug.Assert(lOffset%8 == rOffset%8, "aligned bitmap op called with unaligned offsets") + debug.Assert(lOffset%8 == outOffset%8, "aligned bitmap op called with unaligned output offset") + + nbytes := BytesForBits(length + lOffset%8) + left = left[lOffset/8:] + right = right[rOffset/8:] + out = out[outOffset/8:] + endMask := (lOffset + length%8) + switch nbytes { + case 0: + return + case 1: // everything within a single byte + // (length+lOffset%8) <= 8 + mask := PrecedingBitmask[lOffset%8] + if endMask != 0 { + mask |= TrailingBitmask[(lOffset+length)%8] + } + out[0] = (out[0] & mask) | (op.opByte(left[0], right[0]) &^ mask) + case 2: // don't send zero length to opAligned + firstByteMask := PrecedingBitmask[lOffset%8] + out[0] = (out[0] & firstByteMask) | (op.opByte(left[0], right[0]) &^ firstByteMask) + lastByteMask := byte(0) + if endMask != 0 { + lastByteMask = TrailingBitmask[(lOffset+length)%8] + } + out[1] = (out[1] & lastByteMask) | (op.opByte(left[1], right[1]) &^ lastByteMask) + default: + firstByteMask := PrecedingBitmask[lOffset%8] + out[0] = (out[0] & firstByteMask) | (op.opByte(left[0], right[0]) &^ firstByteMask) + + op.opAligned(left[1:nbytes-1], right[1:nbytes-1], out[1:nbytes-1]) + + lastByteMask := byte(0) + if endMask != 0 { + lastByteMask = TrailingBitmask[(lOffset+length)%8] + } + out[nbytes-1] = (out[nbytes-1] & lastByteMask) | (op.opByte(left[nbytes-1], right[nbytes-1]) &^ lastByteMask) + } +} + +func unalignedBitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + leftRdr := NewBitmapWordReader(left, int(lOffset), int(length)) + rightRdr := NewBitmapWordReader(right, int(rOffset), int(length)) + writer := NewBitmapWordWriter(out, int(outOffset), int(length)) + + for nwords := leftRdr.Words(); nwords > 0; nwords-- { + writer.PutNextWord(op.opWord(leftRdr.NextWord(), rightRdr.NextWord())) + } + for nbytes := leftRdr.TrailingBytes(); nbytes > 0; nbytes-- { + leftByte, leftValid := leftRdr.NextTrailingByte() + rightByte, rightValid := rightRdr.NextTrailingByte() + debug.Assert(leftValid == rightValid, "unexpected mismatch of valid bits") + writer.PutNextTrailingByte(op.opByte(leftByte, rightByte), leftValid) + } +} + +func BitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset, length int64) { + if (outOffset%8 == lOffset%8) && (outOffset%8 == rOffset%8) { + // fastcase! + alignedBitmapOp(op, left, right, lOffset, rOffset, out, outOffset, length) + } else { + unalignedBitmapOp(op, left, right, lOffset, rOffset, out, outOffset, length) + } +} + +func BitmapOpAlloc(mem memory.Allocator, op bitOp, left, right []byte, lOffset, rOffset int64, length int64, outOffset int64) *memory.Buffer { + bits := length + outOffset + buf := memory.NewResizableBuffer(mem) + buf.Resize(int(BytesForBits(bits))) + BitmapOp(op, left, right, lOffset, rOffset, buf.Bytes(), outOffset, length) + return buf +} + +func BitmapAnd(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + BitmapOp(bitAndOp, left, right, lOffset, rOffset, out, outOffset, length) +} + +func BitmapOr(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + BitmapOp(bitOrOp, left, right, lOffset, rOffset, out, outOffset, length) +} + +func BitmapAndAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer { + return BitmapOpAlloc(mem, bitAndOp, left, right, lOffset, rOffset, length, outOffset) +} + +func BitmapOrAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer { + return BitmapOpAlloc(mem, bitOrOp, left, right, lOffset, rOffset, length, outOffset) +} + +func BitmapAndNot(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + BitmapOp(bitAndNotOp, left, right, lOffset, rOffset, out, outOffset, length) +} + +func BitmapAndNotAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer { + return BitmapOpAlloc(mem, bitAndNotOp, left, right, lOffset, rOffset, length, outOffset) +} + +func BitmapXor(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) { + BitmapOp(bitXorOp, left, right, lOffset, rOffset, out, outOffset, length) +} + +func BitmapXorAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer { + return BitmapOpAlloc(mem, bitXorOp, left, right, lOffset, rOffset, length, outOffset) +} + +func BitmapEquals(left, right []byte, lOffset, rOffset int64, length int64) bool { + if lOffset%8 == 0 && rOffset%8 == 0 { + // byte aligned, fast path, can use bytes.Equal (memcmp) + byteLen := length / 8 + lStart := lOffset / 8 + rStart := rOffset / 8 + if !bytes.Equal(left[lStart:lStart+byteLen], right[rStart:rStart+byteLen]) { + return false + } + + // check trailing bits + for i := (length / 8) * 8; i < length; i++ { + if BitIsSet(left, int(lOffset+i)) != BitIsSet(right, int(rOffset+i)) { + return false + } + } + return true + } + + lrdr := NewBitmapWordReader(left, int(lOffset), int(length)) + rrdr := NewBitmapWordReader(right, int(rOffset), int(length)) + + nwords := lrdr.Words() + for nwords > 0 { + nwords-- + if lrdr.NextWord() != rrdr.NextWord() { + return false + } + } + + nbytes := lrdr.TrailingBytes() + for nbytes > 0 { + nbytes-- + lbt, _ := lrdr.NextTrailingByte() + rbt, _ := rrdr.NextTrailingByte() + if lbt != rbt { + return false + } + } + return true +} + +// OptionalBitIndexer is a convenience wrapper for getting bits from +// a bitmap which may or may not be nil. +type OptionalBitIndexer struct { + Bitmap []byte + Offset int +} + +func (b *OptionalBitIndexer) GetBit(i int) bool { + return b.Bitmap == nil || BitIsSet(b.Bitmap, b.Offset+i) +} + +type Bitmap struct { + Data []byte + Offset, Len int64 +} + +func bitLength(bitmaps []Bitmap) (int64, error) { + for _, b := range bitmaps[1:] { + if b.Len != bitmaps[0].Len { + return -1, errors.New("bitmaps must be same length") + } + } + return bitmaps[0].Len, nil +} + +func runVisitWordsAndWriteLoop(bitLen int64, rdrs []*BitmapWordReader, wrs []*BitmapWordWriter, visitor func(in, out []uint64)) { + const bitWidth int64 = int64(uint64SizeBits) + + visited := make([]uint64, len(rdrs)) + output := make([]uint64, len(wrs)) + + // every reader will have same number of words, since they are same + // length'ed. This will be inefficient in some cases. When there's + // offsets beyond the Word boundary, every word would have to be + // created from 2 adjoining words + nwords := int64(rdrs[0].Words()) + bitLen -= nwords * bitWidth + for nwords > 0 { + nwords-- + for i := range visited { + visited[i] = rdrs[i].NextWord() + } + visitor(visited, output) + for i := range output { + wrs[i].PutNextWord(output[i]) + } + } + + // every reader will have the same number of trailing bytes, because + // we already confirmed they have the same length. Because + // offsets beyond the Word boundary can cause adjoining words, the + // tailing portion could be more than one word remaining full/partial + // words to write. + if bitLen == 0 { + return + } + + // convert the word visitor to a bytevisitor + byteVisitor := func(in, out []byte) { + for i, w := range in { + visited[i] = uint64(w) + } + visitor(visited, output) + for i, w := range output { + out[i] = byte(w) + } + } + + visitedBytes := make([]byte, len(rdrs)) + outputBytes := make([]byte, len(wrs)) + nbytes := rdrs[0].trailingBytes + for nbytes > 0 { + nbytes-- + memory.Set(visitedBytes, 0) + memory.Set(outputBytes, 0) + + var validBits int + for i := range rdrs { + visitedBytes[i], validBits = rdrs[i].NextTrailingByte() + } + byteVisitor(visitedBytes, outputBytes) + for i, w := range outputBytes { + wrs[i].PutNextTrailingByte(w, validBits) + } + } +} + +// VisitWordsAndWrite visits words of bits from each input bitmap and +// collects outputs to a slice of output Bitmaps. +// +// All bitmaps must have identical lengths. The first bit in a visited +// bitmap may be offset within the first visited word, but words will +// otherwise contain densely packed bits loaded from the bitmap. That +// offset within the first word is returned. +// +// NOTE: this function is efficient on 3+ sufficiently large bitmaps. +// It also has a large prolog/epilog overhead and should be used +// carefully in other cases. For 2 or fewer bitmaps, and/or smaller +// bitmaps, try BitmapReader and or other utilities. +func VisitWordsAndWrite(args []Bitmap, out []Bitmap, visitor func(in, out []uint64)) error { + bitLen, err := bitLength(args) + if err != nil { + return err + } + + rdrs, wrs := make([]*BitmapWordReader, len(args)), make([]*BitmapWordWriter, len(out)) + for i, in := range args { + rdrs[i] = NewBitmapWordReader(in.Data, int(in.Offset), int(in.Len)) + } + for i, o := range out { + wrs[i] = NewBitmapWordWriter(o.Data, int(o.Offset), int(o.Len)) + } + runVisitWordsAndWriteLoop(bitLen, rdrs, wrs, visitor) + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go new file mode 100644 index 00000000..8c9e97cd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go @@ -0,0 +1,220 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutil + +import ( + "math" + "math/bits" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/memory" +) + +var ( + BitMask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + FlippedBitMask = [8]byte{254, 253, 251, 247, 239, 223, 191, 127} +) + +// IsMultipleOf8 returns whether v is a multiple of 8. +func IsMultipleOf8(v int64) bool { return v&7 == 0 } + +// IsMultipleOf64 returns whether v is a multiple of 64 +func IsMultipleOf64(v int64) bool { return v&63 == 0 } + +func BytesForBits(bits int64) int64 { return (bits + 7) >> 3 } + +// NextPowerOf2 rounds x to the next power of two. +func NextPowerOf2(x int) int { return 1 << uint(bits.Len(uint(x))) } + +// CeilByte rounds size to the next multiple of 8. +func CeilByte(size int) int { return (size + 7) &^ 7 } + +// CeilByte64 rounds size to the next multiple of 8. +func CeilByte64(size int64) int64 { return (size + 7) &^ 7 } + +// BitIsSet returns true if the bit at index i in buf is set (1). +func BitIsSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) != 0 } + +// BitIsNotSet returns true if the bit at index i in buf is not set (0). +func BitIsNotSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) == 0 } + +// SetBit sets the bit at index i in buf to 1. +func SetBit(buf []byte, i int) { buf[uint(i)/8] |= BitMask[byte(i)%8] } + +// ClearBit sets the bit at index i in buf to 0. +func ClearBit(buf []byte, i int) { buf[uint(i)/8] &= FlippedBitMask[byte(i)%8] } + +// SetBitTo sets the bit at index i in buf to val. +func SetBitTo(buf []byte, i int, val bool) { + if val { + SetBit(buf, i) + } else { + ClearBit(buf, i) + } +} + +// CountSetBits counts the number of 1's in buf up to n bits. +func CountSetBits(buf []byte, offset, n int) int { + if offset > 0 { + return countSetBitsWithOffset(buf, offset, n) + } + + count := 0 + + uint64Bytes := n / uint64SizeBits * 8 + for _, v := range bytesToUint64(buf[:uint64Bytes]) { + count += bits.OnesCount64(v) + } + + for _, v := range buf[uint64Bytes : n/8] { + count += bits.OnesCount8(v) + } + + // tail bits + for i := n &^ 0x7; i < n; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + return count +} + +func countSetBitsWithOffset(buf []byte, offset, n int) int { + count := 0 + + beg := offset + end := offset + n + + begU8 := roundUp(beg, uint64SizeBits) + + init := min(n, begU8-beg) + for i := offset; i < beg+init; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + nU64 := (n - init) / uint64SizeBits + begU64 := begU8 / uint64SizeBits + endU64 := begU64 + nU64 + bufU64 := bytesToUint64(buf) + if begU64 < len(bufU64) { + for _, v := range bufU64[begU64:endU64] { + count += bits.OnesCount64(v) + } + } + + // FIXME: use a fallback to bits.OnesCount8 + // before counting the tail bits. + + tail := beg + init + nU64*uint64SizeBits + for i := tail; i < end; i++ { + if BitIsSet(buf, i) { + count++ + } + } + + return count +} + +func roundUp(v, f int) int { + return (v + (f - 1)) / f * f +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +const ( + uint64SizeBytes = int(unsafe.Sizeof(uint64(0))) + uint64SizeBits = uint64SizeBytes * 8 +) + +func bytesToUint64(b []byte) []uint64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / uint64SizeBytes + s.Cap = h.Cap / uint64SizeBytes + + return res +} + +var ( + // PrecedingBitmask is a convenience set of values as bitmasks for checking + // prefix bits of a byte + PrecedingBitmask = [8]byte{0, 1, 3, 7, 15, 31, 63, 127} + // TrailingBitmask is the bitwise complement version of kPrecedingBitmask + TrailingBitmask = [8]byte{255, 254, 252, 248, 240, 224, 192, 128} +) + +// SetBitsTo is a convenience function to quickly set or unset all the bits +// in a bitmap starting at startOffset for length bits. +func SetBitsTo(bits []byte, startOffset, length int64, areSet bool) { + if length == 0 { + return + } + + beg := startOffset + end := startOffset + length + var fill uint8 = 0 + if areSet { + fill = math.MaxUint8 + } + + byteBeg := beg / 8 + byteEnd := end/8 + 1 + + // don't modify bits before the startOffset by using this mask + firstByteMask := PrecedingBitmask[beg%8] + // don't modify bits past the length by using this mask + lastByteMask := TrailingBitmask[end%8] + + if byteEnd == byteBeg+1 { + // set bits within a single byte + onlyByteMask := firstByteMask + if end%8 != 0 { + onlyByteMask = firstByteMask | lastByteMask + } + + bits[byteBeg] &= onlyByteMask + bits[byteBeg] |= fill &^ onlyByteMask + return + } + + // set/clear trailing bits of first byte + bits[byteBeg] &= firstByteMask + bits[byteBeg] |= fill &^ firstByteMask + + if byteEnd-byteBeg > 2 { + memory.Set(bits[byteBeg+1:byteEnd-1], fill) + } + + if end%8 == 0 { + return + } + + bits[byteEnd-1] &= lastByteMask + bits[byteEnd-1] |= fill &^ lastByteMask +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_default.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_default.go new file mode 100644 index 00000000..9f5d3cdc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_default.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !s390x + +package bitutil + +import ( + "unsafe" +) + +var toFromLEFunc = func(in uint64) uint64 { return in } + +func getLSB(v uint64) byte { + return (*[8]byte)(unsafe.Pointer(&v))[0] +} + +func setLSB(v *uint64, b byte) { + (*[8]byte)(unsafe.Pointer(v))[0] = b +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_s390x.go b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_s390x.go new file mode 100644 index 00000000..a9bba439 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_s390x.go @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutil + +import ( + "math/bits" + "unsafe" +) + +var toFromLEFunc = bits.ReverseBytes64 + +func getLSB(v uint64) byte { + return (*[8]byte)(unsafe.Pointer(&v))[7] +} + +func setLSB(v *uint64, b byte) { + (*[8]byte)(unsafe.Pointer(v))[7] = b +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compare.go b/vendor/github.com/apache/arrow/go/v12/arrow/compare.go new file mode 100644 index 00000000..04f9b339 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/compare.go @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" +) + +type typeEqualsConfig struct { + metadata bool +} + +// TypeEqualOption is a functional option type used for configuring type +// equality checks. +type TypeEqualOption func(*typeEqualsConfig) + +// CheckMetadata is an option for TypeEqual that allows checking for metadata +// equality besides type equality. It only makes sense for STRUCT type. +func CheckMetadata() TypeEqualOption { + return func(cfg *typeEqualsConfig) { + cfg.metadata = true + } +} + +// TypeEqual checks if two DataType are the same, optionally checking metadata +// equality for STRUCT types. +func TypeEqual(left, right DataType, opts ...TypeEqualOption) bool { + var cfg typeEqualsConfig + for _, opt := range opts { + opt(&cfg) + } + + switch { + case left == nil || right == nil: + return left == nil && right == nil + case left.ID() != right.ID(): + return false + } + + switch l := left.(type) { + case ExtensionType: + return l.ExtensionEquals(right.(ExtensionType)) + case *ListType: + if !TypeEqual(l.Elem(), right.(*ListType).Elem(), opts...) { + return false + } + if cfg.metadata { + return l.elem.Metadata.Equal(right.(*ListType).elem.Metadata) + } + return l.elem.Nullable == right.(*ListType).elem.Nullable + case *FixedSizeListType: + if !TypeEqual(l.Elem(), right.(*FixedSizeListType).Elem(), opts...) { + return false + } + if cfg.metadata { + return l.elem.Metadata.Equal(right.(*FixedSizeListType).elem.Metadata) + } + return l.n == right.(*FixedSizeListType).n && l.elem.Nullable == right.(*FixedSizeListType).elem.Nullable + case *StructType: + r := right.(*StructType) + switch { + case len(l.fields) != len(r.fields): + return false + case !reflect.DeepEqual(l.index, r.index): + return false + } + for i := range l.fields { + leftField, rightField := l.fields[i], r.fields[i] + switch { + case leftField.Name != rightField.Name: + return false + case leftField.Nullable != rightField.Nullable: + return false + case !TypeEqual(leftField.Type, rightField.Type, opts...): + return false + case cfg.metadata && !leftField.Metadata.Equal(rightField.Metadata): + return false + } + } + return true + case UnionType: + r := right.(UnionType) + if l.Mode() != r.Mode() { + return false + } + + if !reflect.DeepEqual(l.ChildIDs(), r.ChildIDs()) { + return false + } + + for i := range l.Fields() { + leftField, rightField := l.Fields()[i], r.Fields()[i] + switch { + case leftField.Name != rightField.Name: + return false + case leftField.Nullable != rightField.Nullable: + return false + case !TypeEqual(leftField.Type, rightField.Type, opts...): + return false + case cfg.metadata && !leftField.Metadata.Equal(rightField.Metadata): + return false + case l.TypeCodes()[i] != r.TypeCodes()[i]: + return false + } + } + return true + case *TimestampType: + r := right.(*TimestampType) + return l.Unit == r.Unit && l.TimeZone == r.TimeZone + case *RunEndEncodedType: + r := right.(*RunEndEncodedType) + return TypeEqual(l.Encoded(), r.Encoded(), opts...) && + TypeEqual(l.runEnds, r.runEnds, opts...) + default: + return reflect.DeepEqual(left, right) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype.go new file mode 100644 index 00000000..8f9ad2e2 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype.go @@ -0,0 +1,391 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "hash/maphash" + "strings" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +// Type is a logical type. They can be expressed as +// either a primitive physical type (bytes or bits of some fixed size), a +// nested type consisting of other data types, or another data type (e.g. a +// timestamp encoded as an int64) +type Type int + +const ( + // NULL type having no physical storage + NULL Type = iota + + // BOOL is a 1 bit, LSB bit-packed ordering + BOOL + + // UINT8 is an Unsigned 8-bit little-endian integer + UINT8 + + // INT8 is a Signed 8-bit little-endian integer + INT8 + + // UINT16 is an Unsigned 16-bit little-endian integer + UINT16 + + // INT16 is a Signed 16-bit little-endian integer + INT16 + + // UINT32 is an Unsigned 32-bit little-endian integer + UINT32 + + // INT32 is a Signed 32-bit little-endian integer + INT32 + + // UINT64 is an Unsigned 64-bit little-endian integer + UINT64 + + // INT64 is a Signed 64-bit little-endian integer + INT64 + + // FLOAT16 is a 2-byte floating point value + FLOAT16 + + // FLOAT32 is a 4-byte floating point value + FLOAT32 + + // FLOAT64 is an 8-byte floating point value + FLOAT64 + + // STRING is a UTF8 variable-length string + STRING + + // BINARY is a Variable-length byte type (no guarantee of UTF8-ness) + BINARY + + // FIXED_SIZE_BINARY is a binary where each value occupies the same number of bytes + FIXED_SIZE_BINARY + + // DATE32 is int32 days since the UNIX epoch + DATE32 + + // DATE64 is int64 milliseconds since the UNIX epoch + DATE64 + + // TIMESTAMP is an exact timestamp encoded with int64 since UNIX epoch + // Default unit millisecond + TIMESTAMP + + // TIME32 is a signed 32-bit integer, representing either seconds or + // milliseconds since midnight + TIME32 + + // TIME64 is a signed 64-bit integer, representing either microseconds or + // nanoseconds since midnight + TIME64 + + // INTERVAL_MONTHS is YEAR_MONTH interval in SQL style + INTERVAL_MONTHS + + // INTERVAL_DAY_TIME is DAY_TIME in SQL Style + INTERVAL_DAY_TIME + + // DECIMAL128 is a precision- and scale-based decimal type. Storage type depends on the + // parameters. + DECIMAL128 + + // DECIMAL256 is a precision and scale based decimal type, with 256 bit max. not yet implemented + DECIMAL256 + + // LIST is a list of some logical data type + LIST + + // STRUCT of logical types + STRUCT + + // SPARSE_UNION of logical types. not yet implemented + SPARSE_UNION + + // DENSE_UNION of logical types. not yet implemented + DENSE_UNION + + // DICTIONARY aka Category type + DICTIONARY + + // MAP is a repeated struct logical type + MAP + + // Custom data type, implemented by user + EXTENSION + + // Fixed size list of some logical type + FIXED_SIZE_LIST + + // Measure of elapsed time in either seconds, milliseconds, microseconds + // or nanoseconds. + DURATION + + // like STRING, but 64-bit offsets. not yet implemented + LARGE_STRING + + // like BINARY but with 64-bit offsets, not yet implemented + LARGE_BINARY + + // like LIST but with 64-bit offsets. not yet implmented + LARGE_LIST + + // calendar interval with three fields + INTERVAL_MONTH_DAY_NANO + + RUN_END_ENCODED + + // Alias to ensure we do not break any consumers + DECIMAL = DECIMAL128 +) + +// DataType is the representation of an Arrow type. +type DataType interface { + fmt.Stringer + ID() Type + // Name is name of the data type. + Name() string + Fingerprint() string + Layout() DataTypeLayout +} + +// TypesToString is a convenience function to create a list of types +// which are comma delimited as a string +func TypesToString(types []DataType) string { + var b strings.Builder + b.WriteByte('(') + for i, t := range types { + if i != 0 { + b.WriteString(", ") + } + b.WriteString(t.String()) + } + b.WriteByte(')') + return b.String() +} + +// FixedWidthDataType is the representation of an Arrow type that +// requires a fixed number of bits in memory for each element. +type FixedWidthDataType interface { + DataType + // BitWidth returns the number of bits required to store a single element of this data type in memory. + BitWidth() int + // Bytes returns the number of bytes required to store a single element of this data type in memory. + Bytes() int +} + +type BinaryDataType interface { + DataType + IsUtf8() bool + binary() +} + +type OffsetsDataType interface { + DataType + OffsetTypeTraits() OffsetTraits +} + +func HashType(seed maphash.Seed, dt DataType) uint64 { + var h maphash.Hash + h.SetSeed(seed) + h.WriteString(dt.Fingerprint()) + return h.Sum64() +} + +func typeIDFingerprint(id Type) string { + c := string(rune(int(id) + int('A'))) + return "@" + c +} + +func typeFingerprint(typ DataType) string { return typeIDFingerprint(typ.ID()) } + +func timeUnitFingerprint(unit TimeUnit) rune { + switch unit { + case Second: + return 's' + case Millisecond: + return 'm' + case Microsecond: + return 'u' + case Nanosecond: + return 'n' + default: + debug.Assert(false, "unexpected time unit") + return rune(0) + } +} + +// BufferKind describes the type of buffer expected when defining a layout specification +type BufferKind int8 + +// The expected types of buffers +const ( + KindFixedWidth BufferKind = iota + KindVarWidth + KindBitmap + KindAlwaysNull +) + +// BufferSpec provides a specification for the buffers of a particular datatype +type BufferSpec struct { + Kind BufferKind + ByteWidth int // for KindFixedWidth +} + +func (b BufferSpec) Equals(other BufferSpec) bool { + return b.Kind == other.Kind && (b.Kind != KindFixedWidth || b.ByteWidth == other.ByteWidth) +} + +// DataTypeLayout represents the physical layout of a datatype's buffers including +// the number of and types of those binary buffers. This will correspond +// with the buffers in the ArrayData for an array of that type. +type DataTypeLayout struct { + Buffers []BufferSpec + HasDict bool +} + +func SpecFixedWidth(w int) BufferSpec { return BufferSpec{KindFixedWidth, w} } +func SpecVariableWidth() BufferSpec { return BufferSpec{KindVarWidth, -1} } +func SpecBitmap() BufferSpec { return BufferSpec{KindBitmap, -1} } +func SpecAlwaysNull() BufferSpec { return BufferSpec{KindAlwaysNull, -1} } + +// IsInteger is a helper to return true if the type ID provided is one of the +// integral types of uint or int with the varying sizes. +func IsInteger(t Type) bool { + switch t { + case UINT8, INT8, UINT16, INT16, UINT32, INT32, UINT64, INT64: + return true + } + return false +} + +// IsUnsignedInteger is a helper that returns true if the type ID provided is +// one of the uint integral types (uint8, uint16, uint32, uint64) +func IsUnsignedInteger(t Type) bool { + switch t { + case UINT8, UINT16, UINT32, UINT64: + return true + } + return false +} + +// IsSignedInteger is a helper that returns true if the type ID provided is +// one of the int integral types (int8, int16, int32, int64) +func IsSignedInteger(t Type) bool { + switch t { + case INT8, INT16, INT32, INT64: + return true + } + return false +} + +// IsFloating is a helper that returns true if the type ID provided is +// one of Float16, Float32, or Float64 +func IsFloating(t Type) bool { + switch t { + case FLOAT16, FLOAT32, FLOAT64: + return true + } + return false +} + +// IsPrimitive returns true if the provided type ID represents a fixed width +// primitive type. +func IsPrimitive(t Type) bool { + switch t { + case BOOL, UINT8, INT8, UINT16, INT16, UINT32, INT32, UINT64, INT64, + FLOAT16, FLOAT32, FLOAT64, DATE32, DATE64, TIME32, TIME64, TIMESTAMP, + DURATION, INTERVAL_MONTHS, INTERVAL_DAY_TIME, INTERVAL_MONTH_DAY_NANO: + return true + } + return false +} + +// IsBaseBinary returns true for Binary/String and their LARGE variants +func IsBaseBinary(t Type) bool { + switch t { + case BINARY, STRING, LARGE_BINARY, LARGE_STRING: + return true + } + return false +} + +// IsBinaryLike returns true for only BINARY and STRING +func IsBinaryLike(t Type) bool { + switch t { + case BINARY, STRING: + return true + } + return false +} + +// IsLargeBinaryLike returns true for only LARGE_BINARY and LARGE_STRING +func IsLargeBinaryLike(t Type) bool { + switch t { + case LARGE_BINARY, LARGE_STRING: + return true + } + return false +} + +// IsFixedSizeBinary returns true for Decimal128/256 and FixedSizeBinary +func IsFixedSizeBinary(t Type) bool { + switch t { + case DECIMAL128, DECIMAL256, FIXED_SIZE_BINARY: + return true + } + return false +} + +// IsDecimal returns true for Decimal128 and Decimal256 +func IsDecimal(t Type) bool { + switch t { + case DECIMAL128, DECIMAL256: + return true + } + return false +} + +// IsUnion returns true for Sparse and Dense Unions +func IsUnion(t Type) bool { + switch t { + case DENSE_UNION, SPARSE_UNION: + return true + } + return false +} + +// IsListLike returns true for List, LargeList, FixedSizeList, and Map +func IsListLike(t Type) bool { + switch t { + case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP: + return true + } + return false +} + +// IsNested returns true for List, LargeList, FixedSizeList, Map, Struct, and Unions +func IsNested(t Type) bool { + switch t { + case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP, STRUCT, SPARSE_UNION, DENSE_UNION: + return true + } + return false +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_binary.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_binary.go new file mode 100644 index 00000000..a3a85686 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_binary.go @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +// OffsetTraits is a convenient interface over the various type traits +// constants such as arrow.Int32Traits allowing types with offsets, like +// BinaryType, StringType, LargeBinaryType and LargeStringType to have +// a method to return information about their offset type and how many bytes +// would be required to allocate an offset buffer for them. +type OffsetTraits interface { + // BytesRequired returns the number of bytes required to be allocated + // in order to hold the passed in number of elements of this type. + BytesRequired(int) int +} + +type BinaryType struct{} + +func (t *BinaryType) ID() Type { return BINARY } +func (t *BinaryType) Name() string { return "binary" } +func (t *BinaryType) String() string { return "binary" } +func (t *BinaryType) binary() {} +func (t *BinaryType) Fingerprint() string { return typeFingerprint(t) } +func (t *BinaryType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), + SpecFixedWidth(Int32SizeBytes), SpecVariableWidth()}} +} +func (t *BinaryType) OffsetTypeTraits() OffsetTraits { return Int32Traits } +func (BinaryType) IsUtf8() bool { return false } + +type StringType struct{} + +func (t *StringType) ID() Type { return STRING } +func (t *StringType) Name() string { return "utf8" } +func (t *StringType) String() string { return "utf8" } +func (t *StringType) binary() {} +func (t *StringType) Fingerprint() string { return typeFingerprint(t) } +func (t *StringType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), + SpecFixedWidth(Int32SizeBytes), SpecVariableWidth()}} +} +func (t *StringType) OffsetTypeTraits() OffsetTraits { return Int32Traits } +func (StringType) IsUtf8() bool { return true } + +type LargeBinaryType struct{} + +func (t *LargeBinaryType) ID() Type { return LARGE_BINARY } +func (t *LargeBinaryType) Name() string { return "large_binary" } +func (t *LargeBinaryType) String() string { return "large_binary" } +func (t *LargeBinaryType) binary() {} +func (t *LargeBinaryType) Fingerprint() string { return typeFingerprint(t) } +func (t *LargeBinaryType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), + SpecFixedWidth(Int64SizeBytes), SpecVariableWidth()}} +} +func (t *LargeBinaryType) OffsetTypeTraits() OffsetTraits { return Int64Traits } +func (LargeBinaryType) IsUtf8() bool { return false } + +type LargeStringType struct{} + +func (t *LargeStringType) ID() Type { return LARGE_STRING } +func (t *LargeStringType) Name() string { return "large_utf8" } +func (t *LargeStringType) String() string { return "large_utf8" } +func (t *LargeStringType) binary() {} +func (t *LargeStringType) Fingerprint() string { return typeFingerprint(t) } +func (t *LargeStringType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), + SpecFixedWidth(Int64SizeBytes), SpecVariableWidth()}} +} +func (t *LargeStringType) OffsetTypeTraits() OffsetTraits { return Int64Traits } +func (LargeStringType) IsUtf8() bool { return true } + +var ( + BinaryTypes = struct { + Binary BinaryDataType + String BinaryDataType + LargeBinary BinaryDataType + LargeString BinaryDataType + }{ + Binary: &BinaryType{}, + String: &StringType{}, + LargeBinary: &LargeBinaryType{}, + LargeString: &LargeStringType{}, + } +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_encoded.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_encoded.go new file mode 100644 index 00000000..c1750a88 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_encoded.go @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +type EncodedType interface { + DataType + Encoded() DataType +} + +// RunEndEncodedType is the datatype to represent a run-end encoded +// array of data. ValueNullable defaults to true, but can be set false +// if this should represent a type with a non-nullable value field. +type RunEndEncodedType struct { + runEnds DataType + values DataType + ValueNullable bool +} + +func RunEndEncodedOf(runEnds, values DataType) *RunEndEncodedType { + return &RunEndEncodedType{runEnds: runEnds, values: values, ValueNullable: true} +} + +func (*RunEndEncodedType) ID() Type { return RUN_END_ENCODED } +func (*RunEndEncodedType) Name() string { return "run_end_encoded" } +func (*RunEndEncodedType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecAlwaysNull()}} +} + +func (t *RunEndEncodedType) String() string { + return t.Name() + "" +} + +func (t *RunEndEncodedType) Fingerprint() string { + return typeFingerprint(t) + "{" + t.runEnds.Fingerprint() + ";" + t.values.Fingerprint() + ";}" +} + +func (t *RunEndEncodedType) RunEnds() DataType { return t.runEnds } +func (t *RunEndEncodedType) Encoded() DataType { return t.values } + +func (t *RunEndEncodedType) Fields() []Field { + return []Field{ + {Name: "run_ends", Type: t.runEnds}, + {Name: "values", Type: t.values, Nullable: t.ValueNullable}, + } +} + +func (*RunEndEncodedType) ValidRunEndsType(dt DataType) bool { + switch dt.ID() { + case INT16, INT32, INT64: + return true + } + return false +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_extension.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_extension.go new file mode 100644 index 00000000..271c8b0d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_extension.go @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "reflect" + "sync" +) + +var ( + // global extension type registry, initially left null to avoid paying + // the cost if no extension types are used. + // the choice to use a sync.Map here is because it's expected that most + // use cases would be to register some number of types at initialization + // or otherwise and leave them rather than a pattern of repeatedly registering + // and unregistering types. As per the documentation for sync.Map + // (https://pkg.go.dev/sync#Map), it is specialized for the case where an entry + // is written once but read many times which fits our case here as we register + // a type once and then have to read it many times when deserializing messages + // with that type. + extTypeRegistry *sync.Map + // used for initializing the registry once and only once + initReg sync.Once +) + +// convenience function to ensure that the type registry is initialized once +// and only once in a goroutine-safe manner. +func getExtTypeRegistry() *sync.Map { + initReg.Do(func() { extTypeRegistry = &sync.Map{} }) + return extTypeRegistry +} + +// RegisterExtensionType registers the provided ExtensionType by calling ExtensionName +// to use as a Key for registrying the type. If a type with the same name is already +// registered then this will return an error saying so, otherwise it will return nil +// if successful registering the type. +// This function is safe to call from multiple goroutines simultaneously. +func RegisterExtensionType(typ ExtensionType) error { + name := typ.ExtensionName() + registry := getExtTypeRegistry() + if _, existed := registry.LoadOrStore(name, typ); existed { + return fmt.Errorf("arrow: type extension with name %s already defined", name) + } + return nil +} + +// UnregisterExtensionType removes the type with the given name from the registry +// causing any messages with that type which come in to be expressed with their +// metadata and underlying type instead of the extension type that isn't known. +// This function is safe to call from multiple goroutines simultaneously. +func UnregisterExtensionType(typName string) error { + registry := getExtTypeRegistry() + if _, loaded := registry.LoadAndDelete(typName); !loaded { + return fmt.Errorf("arrow: no type extension with name %s found", typName) + } + return nil +} + +// GetExtensionType retrieves and returns the extension type of the given name +// from the global extension type registry. If the type isn't found it will return +// nil. This function is safe to call from multiple goroutines concurrently. +func GetExtensionType(typName string) ExtensionType { + registry := getExtTypeRegistry() + if val, ok := registry.Load(typName); ok { + return val.(ExtensionType) + } + return nil +} + +// ExtensionType is an interface for handling user-defined types. They must be +// DataTypes and must embed arrow.ExtensionBase in them in order to work properly +// ensuring that they always have the expected base behavior. +// +// The arrow.ExtensionBase that needs to be embedded implements the DataType interface +// leaving the remaining functions having to be implemented by the actual user-defined +// type in order to be handled properly. +type ExtensionType interface { + DataType + // ArrayType should return the reflect.TypeOf(ExtensionArrayType{}) where the + // ExtensionArrayType is a type that implements the array.ExtensionArray interface. + // Such a type must also embed the array.ExtensionArrayBase in it. This will be used + // when creating arrays of this ExtensionType by using reflect.New + ArrayType() reflect.Type + // ExtensionName is what will be used when registering / unregistering this extension + // type. Multiple user-defined types can be defined with a parameterized ExtensionType + // as long as the parameter is used in the ExtensionName to distinguish the instances + // in the global Extension Type registry. + // The return from this is also what will be placed in the metadata for IPC communication + // under the key ARROW:extension:name + ExtensionName() string + // StorageType returns the underlying storage type which is used by this extension + // type. It is already implemented by the ExtensionBase struct and thus does not need + // to be re-implemented by a user-defined type. + StorageType() DataType + // ExtensionEquals is used to tell whether two ExtensionType instances are equal types. + ExtensionEquals(ExtensionType) bool + // Serialize should produce any extra metadata necessary for initializing an instance of + // this user-defined type. Not all user-defined types require this and it is valid to return + // nil from this function or an empty slice. This is used for the IPC format and will be + // added to metadata for IPC communication under the key ARROW:extension:metadata + // This should be implemented such that it is valid to be called by multiple goroutines + // concurrently. + Serialize() string + // Deserialize is called when reading in extension arrays and types via the IPC format + // in order to construct an instance of the appropriate extension type. The data passed in + // is pulled from the ARROW:extension:metadata key and may be nil or an empty slice. + // If the storage type is incorrect or something else is invalid with the data this should + // return nil and an appropriate error. + Deserialize(storageType DataType, data string) (ExtensionType, error) + + mustEmbedExtensionBase() +} + +// ExtensionBase is the base struct for user-defined Extension Types which must be +// embedded in any user-defined types like so: +// +// type UserDefinedType struct { +// arrow.ExtensionBase +// // any other data +// } +type ExtensionBase struct { + // Storage is the underlying storage type + Storage DataType +} + +// ID always returns arrow.EXTENSION and should not be overridden +func (*ExtensionBase) ID() Type { return EXTENSION } + +// Name should always return "extension" and should not be overridden +func (*ExtensionBase) Name() string { return "extension" } + +// String by default will return "extension_type" by can be overridden +// to customize what is printed out when printing this extension type. +func (e *ExtensionBase) String() string { return fmt.Sprintf("extension_type", e.Storage) } + +// StorageType returns the underlying storage type and exists so that functions +// written against the ExtensionType interface can access the storage type. +func (e *ExtensionBase) StorageType() DataType { return e.Storage } + +func (e *ExtensionBase) Fingerprint() string { return typeFingerprint(e) + e.Storage.Fingerprint() } + +func (e *ExtensionBase) Fields() []Field { + if nested, ok := e.Storage.(NestedType); ok { + return nested.Fields() + } + return nil +} + +func (e *ExtensionBase) Layout() DataTypeLayout { return e.Storage.Layout() } + +// this no-op exists to ensure that this type must be embedded in any user-defined extension type. +// +//lint:ignore U1000 this function is intentionally unused as it only exists to ensure embedding happens +func (ExtensionBase) mustEmbedExtensionBase() {} + +var ( + _ DataType = (*ExtensionBase)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go new file mode 100644 index 00000000..2a2bbc77 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go @@ -0,0 +1,809 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "golang.org/x/xerrors" +) + +type BooleanType struct{} + +func (t *BooleanType) ID() Type { return BOOL } +func (t *BooleanType) Name() string { return "bool" } +func (t *BooleanType) String() string { return "bool" } +func (t *BooleanType) Fingerprint() string { return typeFingerprint(t) } +func (BooleanType) Bytes() int { return 1 } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *BooleanType) BitWidth() int { return 1 } + +func (BooleanType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecBitmap()}} +} + +type FixedSizeBinaryType struct { + ByteWidth int +} + +func (*FixedSizeBinaryType) ID() Type { return FIXED_SIZE_BINARY } +func (*FixedSizeBinaryType) Name() string { return "fixed_size_binary" } +func (t *FixedSizeBinaryType) BitWidth() int { return 8 * t.ByteWidth } +func (t *FixedSizeBinaryType) Bytes() int { return t.ByteWidth } +func (t *FixedSizeBinaryType) Fingerprint() string { return typeFingerprint(t) } +func (t *FixedSizeBinaryType) String() string { + return "fixed_size_binary[" + strconv.Itoa(t.ByteWidth) + "]" +} +func (t *FixedSizeBinaryType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(t.ByteWidth)}} +} + +type ( + Timestamp int64 + Time32 int32 + Time64 int64 + TimeUnit int + Date32 int32 + Date64 int64 + Duration int64 +) + +// Date32FromTime returns a Date32 value from a time object +func Date32FromTime(t time.Time) Date32 { + if _, offset := t.Zone(); offset != 0 { + // properly account for timezone adjustments before we calculate + // the number of days by adjusting the time and converting to UTC + t = t.Add(time.Duration(offset) * time.Second).UTC() + } + return Date32(t.Truncate(24*time.Hour).Unix() / int64((time.Hour * 24).Seconds())) +} + +func (d Date32) ToTime() time.Time { + return time.Unix(0, 0).UTC().AddDate(0, 0, int(d)) +} + +func (d Date32) FormattedString() string { + return d.ToTime().Format("2006-01-02") +} + +// Date64FromTime returns a Date64 value from a time object +func Date64FromTime(t time.Time) Date64 { + if _, offset := t.Zone(); offset != 0 { + // properly account for timezone adjustments before we calculate + // the actual value by adjusting the time and converting to UTC + t = t.Add(time.Duration(offset) * time.Second).UTC() + } + // truncate to the start of the day to get the correct value + t = t.Truncate(24 * time.Hour) + return Date64(t.Unix()*1e3 + int64(t.Nanosecond())/1e6) +} + +func (d Date64) ToTime() time.Time { + days := int(int64(d) / (time.Hour * 24).Milliseconds()) + return time.Unix(0, 0).UTC().AddDate(0, 0, days) +} + +func (d Date64) FormattedString() string { + return d.ToTime().Format("2006-01-02") +} + +// TimestampFromStringInLocation is like TimestampFromString, but treats the time instant +// as if it were in the passed timezone before converting to UTC for internal representation. +func TimestampFromStringInLocation(val string, unit TimeUnit, loc *time.Location) (Timestamp, bool, error) { + if len(val) < 10 { + return 0, false, fmt.Errorf("%w: invalid timestamp string", ErrInvalid) + } + + var ( + format = "2006-01-02" + zoneFmt string + lenWithoutZone = len(val) + ) + + if lenWithoutZone > 10 { + switch { + case val[len(val)-1] == 'Z': + zoneFmt = "Z" + lenWithoutZone-- + case val[len(val)-3] == '+' || val[len(val)-3] == '-': + zoneFmt = "-07" + lenWithoutZone -= 3 + case val[len(val)-5] == '+' || val[len(val)-5] == '-': + zoneFmt = "-0700" + lenWithoutZone -= 5 + case val[len(val)-6] == '+' || val[len(val)-6] == '-': + zoneFmt = "-07:00" + lenWithoutZone -= 6 + } + } + + switch { + case lenWithoutZone == 13: + format += string(val[10]) + "15" + case lenWithoutZone == 16: + format += string(val[10]) + "15:04" + case lenWithoutZone >= 19: + format += string(val[10]) + "15:04:05.999999999" + } + + // error if we're truncating precision + // don't need a case for nano as time.Parse will already error if + // more than nanosecond precision is provided + switch { + case unit == Second && lenWithoutZone > 19: + return 0, zoneFmt != "", xerrors.New("provided more than second precision for timestamp[s]") + case unit == Millisecond && lenWithoutZone > 23: + return 0, zoneFmt != "", xerrors.New("provided more than millisecond precision for timestamp[ms]") + case unit == Microsecond && lenWithoutZone > 26: + return 0, zoneFmt != "", xerrors.New("provided more than microsecond precision for timestamp[us]") + } + + format += zoneFmt + out, err := time.Parse(format, val) + if err != nil { + return 0, zoneFmt != "", fmt.Errorf("%w: %s", ErrInvalid, err) + } + if loc != time.UTC { + // convert to UTC by putting the same time instant in the desired location + // before converting to UTC + out = out.In(loc).UTC() + } + + switch unit { + case Second: + return Timestamp(out.Unix()), zoneFmt != "", nil + case Millisecond: + return Timestamp(out.Unix()*1e3 + int64(out.Nanosecond())/1e6), zoneFmt != "", nil + case Microsecond: + return Timestamp(out.Unix()*1e6 + int64(out.Nanosecond())/1e3), zoneFmt != "", nil + case Nanosecond: + return Timestamp(out.UnixNano()), zoneFmt != "", nil + } + return 0, zoneFmt != "", fmt.Errorf("%w: unexpected timestamp unit: %s", ErrInvalid, unit) +} + +// TimestampFromString parses a string and returns a timestamp for the given unit +// level. +// +// The timestamp should be in one of the following forms, [T] can be either T +// or a space, and [.zzzzzzzzz] can be either left out or up to 9 digits of +// fractions of a second. +// +// YYYY-MM-DD +// YYYY-MM-DD[T]HH +// YYYY-MM-DD[T]HH:MM +// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzz] +// +// You can also optionally have an ending Z to indicate UTC or indicate a specific +// timezone using ยฑHH, ยฑHHMM or ยฑHH:MM at the end of the string. +func TimestampFromString(val string, unit TimeUnit) (Timestamp, error) { + tm, _, err := TimestampFromStringInLocation(val, unit, time.UTC) + return tm, err +} + +func (t Timestamp) ToTime(unit TimeUnit) time.Time { + if unit == Second { + return time.Unix(int64(t), 0).UTC() + } + return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC() +} + +// Time32FromString parses a string to return a Time32 value in the given unit, +// unit needs to be only seconds or milliseconds and the string should be in the +// form of HH:MM or HH:MM:SS[.zzz] where the fractions of a second are optional. +func Time32FromString(val string, unit TimeUnit) (Time32, error) { + switch unit { + case Second: + if len(val) > 8 { + return 0, xerrors.New("cannot convert larger than second precision to time32s") + } + case Millisecond: + if len(val) > 12 { + return 0, xerrors.New("cannot convert larger than millisecond precision to time32ms") + } + case Microsecond, Nanosecond: + return 0, xerrors.New("time32 can only be seconds or milliseconds") + } + + var ( + out time.Time + err error + ) + switch { + case len(val) == 5: + out, err = time.Parse("15:04", val) + default: + out, err = time.Parse("15:04:05.999", val) + } + if err != nil { + return 0, err + } + t := out.Sub(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC)) + if unit == Second { + return Time32(t.Seconds()), nil + } + return Time32(t.Milliseconds()), nil +} + +func (t Time32) ToTime(unit TimeUnit) time.Time { + return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC() +} + +func (t Time32) FormattedString(unit TimeUnit) string { + const baseFmt = "15:04:05" + tm := t.ToTime(unit) + switch unit { + case Second: + return tm.Format(baseFmt) + case Millisecond: + return tm.Format(baseFmt + ".000") + } + return "" +} + +// Time64FromString parses a string to return a Time64 value in the given unit, +// unit needs to be only microseconds or nanoseconds and the string should be in the +// form of HH:MM or HH:MM:SS[.zzzzzzzzz] where the fractions of a second are optional. +func Time64FromString(val string, unit TimeUnit) (Time64, error) { + // don't need to check length for nanoseconds as Parse will already error + // if more than 9 digits are provided for the fractional second + switch unit { + case Microsecond: + if len(val) > 15 { + return 0, xerrors.New("cannot convert larger than microsecond precision to time64us") + } + case Second, Millisecond: + return 0, xerrors.New("time64 should only be microseconds or nanoseconds") + } + + var ( + out time.Time + err error + ) + switch { + case len(val) == 5: + out, err = time.Parse("15:04", val) + default: + out, err = time.Parse("15:04:05.999999999", val) + } + if err != nil { + return 0, err + } + t := out.Sub(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC)) + if unit == Microsecond { + return Time64(t.Microseconds()), nil + } + return Time64(t.Nanoseconds()), nil +} + +func (t Time64) ToTime(unit TimeUnit) time.Time { + return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC() +} + +func (t Time64) FormattedString(unit TimeUnit) string { + const baseFmt = "15:04:05.000000" + tm := t.ToTime(unit) + switch unit { + case Microsecond: + return tm.Format(baseFmt) + case Nanosecond: + return tm.Format(baseFmt + "000") + } + return "" +} + +const ( + Second TimeUnit = iota + Millisecond + Microsecond + Nanosecond +) + +var TimeUnitValues = []TimeUnit{Second, Millisecond, Microsecond, Nanosecond} + +func (u TimeUnit) Multiplier() time.Duration { + return [...]time.Duration{time.Second, time.Millisecond, time.Microsecond, time.Nanosecond}[uint(u)&3] +} + +func (u TimeUnit) String() string { return [...]string{"s", "ms", "us", "ns"}[uint(u)&3] } + +type TemporalWithUnit interface { + FixedWidthDataType + TimeUnit() TimeUnit +} + +// TimestampType is encoded as a 64-bit signed integer since the UNIX epoch (2017-01-01T00:00:00Z). +// The zero-value is a nanosecond and time zone neutral. Time zone neutral can be +// considered UTC without having "UTC" as a time zone. +type TimestampType struct { + Unit TimeUnit + TimeZone string + + loc *time.Location +} + +func (*TimestampType) ID() Type { return TIMESTAMP } +func (*TimestampType) Name() string { return "timestamp" } +func (t *TimestampType) String() string { + switch len(t.TimeZone) { + case 0: + return "timestamp[" + t.Unit.String() + "]" + default: + return "timestamp[" + t.Unit.String() + ", tz=" + t.TimeZone + "]" + } +} + +func (t *TimestampType) Fingerprint() string { + return fmt.Sprintf("%s%d:%s", typeFingerprint(t)+string(timeUnitFingerprint(t.Unit)), len(t.TimeZone), t.TimeZone) +} + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (*TimestampType) BitWidth() int { return 64 } + +func (TimestampType) Bytes() int { return Int64SizeBytes } + +func (TimestampType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(TimestampSizeBytes)}} +} + +func (t *TimestampType) TimeUnit() TimeUnit { return t.Unit } + +// ClearCachedLocation clears the cached time.Location object in the type. +// This should be called if you change the value of the TimeZone after having +// potentially called GetZone. +func (t *TimestampType) ClearCachedLocation() { + t.loc = nil +} + +// GetZone returns a *time.Location that represents the current TimeZone member +// of the TimestampType. If it is "", "UTC", or "utc", you'll get time.UTC. +// Otherwise it must either be a valid tzdata string such as "America/New_York" +// or of the format +HH:MM or -HH:MM indicating an absolute offset. +// +// The location object will be cached in the TimestampType for subsequent calls +// so if you change the value of TimeZone after calling this, make sure to call +// ClearCachedLocation. +func (t *TimestampType) GetZone() (*time.Location, error) { + if t.loc != nil { + return t.loc, nil + } + + // the TimeZone string is allowed to be either a valid tzdata string + // such as "America/New_York" or an absolute offset of the form -XX:XX + // or +XX:XX + // + // As such we have two methods we can try, first we'll try LoadLocation + // and if that fails, we'll test for an absolute offset. + if t.TimeZone == "" || t.TimeZone == "UTC" || t.TimeZone == "utc" { + t.loc = time.UTC + return time.UTC, nil + } + + if loc, err := time.LoadLocation(t.TimeZone); err == nil { + t.loc = loc + return t.loc, err + } + + // at this point we know that the timezone isn't empty, and didn't match + // anything in the tzdata names. So either it's an absolute offset + // or it's invalid. + timetz, err := time.Parse("-07:00", t.TimeZone) + if err != nil { + return time.UTC, fmt.Errorf("could not find timezone location for '%s'", t.TimeZone) + } + + _, offset := timetz.Zone() + t.loc = time.FixedZone(t.TimeZone, offset) + return t.loc, nil +} + +// GetToTimeFunc returns a function for converting an arrow.Timestamp value into a +// time.Time object with proper TimeZone and precision. If the TimeZone is invalid +// this will return an error. It calls GetZone to get the timezone for consistency. +func (t *TimestampType) GetToTimeFunc() (func(Timestamp) time.Time, error) { + tz, err := t.GetZone() + if err != nil { + return nil, err + } + + switch t.Unit { + case Second: + return func(v Timestamp) time.Time { return time.Unix(int64(v), 0).In(tz) }, nil + case Millisecond: + factor := int64(time.Second / time.Millisecond) + return func(v Timestamp) time.Time { + return time.Unix(int64(v)/factor, (int64(v)%factor)*int64(time.Millisecond)).In(tz) + }, nil + case Microsecond: + factor := int64(time.Second / time.Microsecond) + return func(v Timestamp) time.Time { + return time.Unix(int64(v)/factor, (int64(v)%factor)*int64(time.Microsecond)).In(tz) + }, nil + case Nanosecond: + return func(v Timestamp) time.Time { return time.Unix(0, int64(v)).In(tz) }, nil + } + return nil, fmt.Errorf("invalid timestamp unit: %s", t.Unit) +} + +// Time32Type is encoded as a 32-bit signed integer, representing either seconds or milliseconds since midnight. +type Time32Type struct { + Unit TimeUnit +} + +func (*Time32Type) ID() Type { return TIME32 } +func (*Time32Type) Name() string { return "time32" } +func (*Time32Type) BitWidth() int { return 32 } +func (*Time32Type) Bytes() int { return Int32SizeBytes } +func (t *Time32Type) String() string { return "time32[" + t.Unit.String() + "]" } +func (t *Time32Type) Fingerprint() string { + return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit)) +} + +func (Time32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Time32SizeBytes)}} +} + +func (t *Time32Type) TimeUnit() TimeUnit { return t.Unit } + +// Time64Type is encoded as a 64-bit signed integer, representing either microseconds or nanoseconds since midnight. +type Time64Type struct { + Unit TimeUnit +} + +func (*Time64Type) ID() Type { return TIME64 } +func (*Time64Type) Name() string { return "time64" } +func (*Time64Type) BitWidth() int { return 64 } +func (*Time64Type) Bytes() int { return Int64SizeBytes } +func (t *Time64Type) String() string { return "time64[" + t.Unit.String() + "]" } +func (t *Time64Type) Fingerprint() string { + return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit)) +} + +func (Time64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Time64SizeBytes)}} +} + +func (t *Time64Type) TimeUnit() TimeUnit { return t.Unit } + +// DurationType is encoded as a 64-bit signed integer, representing an amount +// of elapsed time without any relation to a calendar artifact. +type DurationType struct { + Unit TimeUnit +} + +func (*DurationType) ID() Type { return DURATION } +func (*DurationType) Name() string { return "duration" } +func (*DurationType) BitWidth() int { return 64 } +func (*DurationType) Bytes() int { return Int64SizeBytes } +func (t *DurationType) String() string { return "duration[" + t.Unit.String() + "]" } +func (t *DurationType) Fingerprint() string { + return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit)) +} + +func (DurationType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(DurationSizeBytes)}} +} + +func (t *DurationType) TimeUnit() TimeUnit { return t.Unit } + +// Float16Type represents a floating point value encoded with a 16-bit precision. +type Float16Type struct{} + +func (t *Float16Type) ID() Type { return FLOAT16 } +func (t *Float16Type) Name() string { return "float16" } +func (t *Float16Type) String() string { return "float16" } +func (t *Float16Type) Fingerprint() string { return typeFingerprint(t) } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *Float16Type) BitWidth() int { return 16 } + +func (Float16Type) Bytes() int { return Float16SizeBytes } + +func (Float16Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Float16SizeBytes)}} +} + +type DecimalType interface { + DataType + GetPrecision() int32 + GetScale() int32 +} + +func NewDecimalType(id Type, prec, scale int32) (DecimalType, error) { + switch id { + case DECIMAL128: + return &Decimal128Type{Precision: prec, Scale: scale}, nil + case DECIMAL256: + return &Decimal256Type{Precision: prec, Scale: scale}, nil + default: + return nil, fmt.Errorf("%w: must use DECIMAL128 or DECIMAL256 to create a DecimalType", ErrInvalid) + } +} + +// Decimal128Type represents a fixed-size 128-bit decimal type. +type Decimal128Type struct { + Precision int32 + Scale int32 +} + +func (*Decimal128Type) ID() Type { return DECIMAL128 } +func (*Decimal128Type) Name() string { return "decimal" } +func (*Decimal128Type) BitWidth() int { return 128 } +func (*Decimal128Type) Bytes() int { return Decimal128SizeBytes } +func (t *Decimal128Type) String() string { + return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale) +} +func (t *Decimal128Type) Fingerprint() string { + return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale) +} +func (t *Decimal128Type) GetPrecision() int32 { return t.Precision } +func (t *Decimal128Type) GetScale() int32 { return t.Scale } + +func (Decimal128Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal128SizeBytes)}} +} + +// Decimal256Type represents a fixed-size 256-bit decimal type. +type Decimal256Type struct { + Precision int32 + Scale int32 +} + +func (*Decimal256Type) ID() Type { return DECIMAL256 } +func (*Decimal256Type) Name() string { return "decimal256" } +func (*Decimal256Type) BitWidth() int { return 256 } +func (*Decimal256Type) Bytes() int { return Decimal256SizeBytes } +func (t *Decimal256Type) String() string { + return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale) +} +func (t *Decimal256Type) Fingerprint() string { + return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale) +} +func (t *Decimal256Type) GetPrecision() int32 { return t.Precision } +func (t *Decimal256Type) GetScale() int32 { return t.Scale } + +func (Decimal256Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal256SizeBytes)}} +} + +// MonthInterval represents a number of months. +type MonthInterval int32 + +func (m *MonthInterval) UnmarshalJSON(data []byte) error { + var val struct { + Months int32 `json:"months"` + } + if err := json.Unmarshal(data, &val); err != nil { + return err + } + + *m = MonthInterval(val.Months) + return nil +} + +func (m MonthInterval) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Months int32 `json:"months"` + }{int32(m)}) +} + +// MonthIntervalType is encoded as a 32-bit signed integer, +// representing a number of months. +type MonthIntervalType struct{} + +func (*MonthIntervalType) ID() Type { return INTERVAL_MONTHS } +func (*MonthIntervalType) Name() string { return "month_interval" } +func (*MonthIntervalType) String() string { return "month_interval" } +func (*MonthIntervalType) Fingerprint() string { return typeIDFingerprint(INTERVAL_MONTHS) + "M" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *MonthIntervalType) BitWidth() int { return 32 } + +func (MonthIntervalType) Bytes() int { return Int32SizeBytes } +func (MonthIntervalType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(MonthIntervalSizeBytes)}} +} + +// DayTimeInterval represents a number of days and milliseconds (fraction of day). +type DayTimeInterval struct { + Days int32 `json:"days"` + Milliseconds int32 `json:"milliseconds"` +} + +// DayTimeIntervalType is encoded as a pair of 32-bit signed integer, +// representing a number of days and milliseconds (fraction of day). +type DayTimeIntervalType struct{} + +func (*DayTimeIntervalType) ID() Type { return INTERVAL_DAY_TIME } +func (*DayTimeIntervalType) Name() string { return "day_time_interval" } +func (*DayTimeIntervalType) String() string { return "day_time_interval" } +func (*DayTimeIntervalType) Fingerprint() string { return typeIDFingerprint(INTERVAL_DAY_TIME) + "d" } + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (t *DayTimeIntervalType) BitWidth() int { return 64 } + +func (DayTimeIntervalType) Bytes() int { return DayTimeIntervalSizeBytes } +func (DayTimeIntervalType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(DayTimeIntervalSizeBytes)}} +} + +// MonthDayNanoInterval represents a number of months, days and nanoseconds (fraction of day). +type MonthDayNanoInterval struct { + Months int32 `json:"months"` + Days int32 `json:"days"` + Nanoseconds int64 `json:"nanoseconds"` +} + +// MonthDayNanoIntervalType is encoded as two signed 32-bit integers representing +// a number of months and a number of days, followed by a 64-bit integer representing +// the number of nanoseconds since midnight for fractions of a day. +type MonthDayNanoIntervalType struct{} + +func (*MonthDayNanoIntervalType) ID() Type { return INTERVAL_MONTH_DAY_NANO } +func (*MonthDayNanoIntervalType) Name() string { return "month_day_nano_interval" } +func (*MonthDayNanoIntervalType) String() string { return "month_day_nano_interval" } +func (*MonthDayNanoIntervalType) Fingerprint() string { + return typeIDFingerprint(INTERVAL_MONTH_DAY_NANO) + "N" +} + +// BitWidth returns the number of bits required to store a single element of this data type in memory. +func (*MonthDayNanoIntervalType) BitWidth() int { return 128 } +func (*MonthDayNanoIntervalType) Bytes() int { return MonthDayNanoIntervalSizeBytes } +func (MonthDayNanoIntervalType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(MonthDayNanoIntervalSizeBytes)}} +} + +type TimestampConvertOp int8 + +const ( + ConvDIVIDE = iota + ConvMULTIPLY +) + +var timestampConversion = [...][4]struct { + op TimestampConvertOp + factor int64 +}{ + Nanosecond: { + Nanosecond: {ConvMULTIPLY, int64(time.Nanosecond)}, + Microsecond: {ConvDIVIDE, int64(time.Microsecond)}, + Millisecond: {ConvDIVIDE, int64(time.Millisecond)}, + Second: {ConvDIVIDE, int64(time.Second)}, + }, + Microsecond: { + Nanosecond: {ConvMULTIPLY, int64(time.Microsecond)}, + Microsecond: {ConvMULTIPLY, 1}, + Millisecond: {ConvDIVIDE, int64(time.Millisecond / time.Microsecond)}, + Second: {ConvDIVIDE, int64(time.Second / time.Microsecond)}, + }, + Millisecond: { + Nanosecond: {ConvMULTIPLY, int64(time.Millisecond)}, + Microsecond: {ConvMULTIPLY, int64(time.Millisecond / time.Microsecond)}, + Millisecond: {ConvMULTIPLY, 1}, + Second: {ConvDIVIDE, int64(time.Second / time.Millisecond)}, + }, + Second: { + Nanosecond: {ConvMULTIPLY, int64(time.Second)}, + Microsecond: {ConvMULTIPLY, int64(time.Second / time.Microsecond)}, + Millisecond: {ConvMULTIPLY, int64(time.Second / time.Millisecond)}, + Second: {ConvMULTIPLY, 1}, + }, +} + +func GetTimestampConvert(in, out TimeUnit) (op TimestampConvertOp, factor int64) { + conv := timestampConversion[int(in)][int(out)] + return conv.op, conv.factor +} + +func ConvertTimestampValue(in, out TimeUnit, value int64) int64 { + conv := timestampConversion[int(in)][int(out)] + switch conv.op { + case ConvMULTIPLY: + return value * conv.factor + case ConvDIVIDE: + return value / conv.factor + } + + return 0 +} + +// DictionaryType represents categorical or dictionary-encoded in-memory data +// It contains a dictionary-encoded value type (any type) and an index type +// (any integer type). +type DictionaryType struct { + IndexType DataType + ValueType DataType + Ordered bool +} + +func (*DictionaryType) ID() Type { return DICTIONARY } +func (*DictionaryType) Name() string { return "dictionary" } +func (d *DictionaryType) BitWidth() int { return d.IndexType.(FixedWidthDataType).BitWidth() } +func (d *DictionaryType) Bytes() int { return d.IndexType.(FixedWidthDataType).Bytes() } +func (d *DictionaryType) String() string { + return fmt.Sprintf("%s", + d.Name(), d.ValueType, d.IndexType, d.Ordered) +} +func (d *DictionaryType) Fingerprint() string { + indexFingerprint := d.IndexType.Fingerprint() + valueFingerprint := d.ValueType.Fingerprint() + ordered := "1" + if !d.Ordered { + ordered = "0" + } + + if len(valueFingerprint) > 0 { + return typeFingerprint(d) + indexFingerprint + valueFingerprint + ordered + } + return ordered +} + +func (d *DictionaryType) Layout() DataTypeLayout { + layout := d.IndexType.Layout() + layout.HasDict = true + return layout +} + +var ( + FixedWidthTypes = struct { + Boolean FixedWidthDataType + Date32 FixedWidthDataType + Date64 FixedWidthDataType + DayTimeInterval FixedWidthDataType + Duration_s FixedWidthDataType + Duration_ms FixedWidthDataType + Duration_us FixedWidthDataType + Duration_ns FixedWidthDataType + Float16 FixedWidthDataType + MonthInterval FixedWidthDataType + Time32s FixedWidthDataType + Time32ms FixedWidthDataType + Time64us FixedWidthDataType + Time64ns FixedWidthDataType + Timestamp_s FixedWidthDataType + Timestamp_ms FixedWidthDataType + Timestamp_us FixedWidthDataType + Timestamp_ns FixedWidthDataType + MonthDayNanoInterval FixedWidthDataType + }{ + Boolean: &BooleanType{}, + Date32: &Date32Type{}, + Date64: &Date64Type{}, + DayTimeInterval: &DayTimeIntervalType{}, + Duration_s: &DurationType{Unit: Second}, + Duration_ms: &DurationType{Unit: Millisecond}, + Duration_us: &DurationType{Unit: Microsecond}, + Duration_ns: &DurationType{Unit: Nanosecond}, + Float16: &Float16Type{}, + MonthInterval: &MonthIntervalType{}, + Time32s: &Time32Type{Unit: Second}, + Time32ms: &Time32Type{Unit: Millisecond}, + Time64us: &Time64Type{Unit: Microsecond}, + Time64ns: &Time64Type{Unit: Nanosecond}, + Timestamp_s: &TimestampType{Unit: Second, TimeZone: "UTC"}, + Timestamp_ms: &TimestampType{Unit: Millisecond, TimeZone: "UTC"}, + Timestamp_us: &TimestampType{Unit: Microsecond, TimeZone: "UTC"}, + Timestamp_ns: &TimestampType{Unit: Nanosecond, TimeZone: "UTC"}, + MonthDayNanoInterval: &MonthDayNanoIntervalType{}, + } + + _ FixedWidthDataType = (*FixedSizeBinaryType)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go new file mode 100644 index 00000000..a6e280cd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go @@ -0,0 +1,756 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +type NestedType interface { + DataType + Fields() []Field +} + +// ListType describes a nested type in which each array slot contains +// a variable-size sequence of values, all having the same relative type. +type ListType struct { + elem Field +} + +func ListOfField(f Field) *ListType { + if f.Type == nil { + panic("arrow: nil type for list field") + } + return &ListType{elem: f} +} + +// ListOf returns the list type with element type t. +// For example, if t represents int32, ListOf(t) represents []int32. +// +// ListOf panics if t is nil or invalid. NullableElem defaults to true +func ListOf(t DataType) *ListType { + if t == nil { + panic("arrow: nil DataType") + } + return &ListType{elem: Field{Name: "item", Type: t, Nullable: true}} +} + +// ListOfNonNullable is like ListOf but NullableElem defaults to false, indicating +// that the child type should be marked as non-nullable. +func ListOfNonNullable(t DataType) *ListType { + if t == nil { + panic("arrow: nil DataType") + } + return &ListType{elem: Field{Name: "item", Type: t, Nullable: false}} +} + +func (*ListType) ID() Type { return LIST } +func (*ListType) Name() string { return "list" } + +func (t *ListType) String() string { + if t.elem.Nullable { + return fmt.Sprintf("list<%s: %s, nullable>", t.elem.Name, t.elem.Type) + } + return fmt.Sprintf("list<%s: %s>", t.elem.Name, t.elem.Type) +} + +func (t *ListType) Fingerprint() string { + child := t.elem.Type.Fingerprint() + if len(child) > 0 { + return typeFingerprint(t) + "{" + child + "}" + } + return "" +} + +func (t *ListType) SetElemMetadata(md Metadata) { t.elem.Metadata = md } + +func (t *ListType) SetElemNullable(n bool) { t.elem.Nullable = n } + +// Elem returns the ListType's element type. +func (t *ListType) Elem() DataType { return t.elem.Type } + +func (t *ListType) ElemField() Field { + return t.elem +} + +func (t *ListType) Fields() []Field { return []Field{t.ElemField()} } + +func (ListType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}} +} + +func (ListType) OffsetTypeTraits() OffsetTraits { return Int32Traits } + +type LargeListType struct { + ListType +} + +func (LargeListType) ID() Type { return LARGE_LIST } +func (LargeListType) Name() string { return "large_list" } +func (t *LargeListType) String() string { + return "large_" + t.ListType.String() +} + +func (t *LargeListType) Fingerprint() string { + child := t.elem.Type.Fingerprint() + if len(child) > 0 { + return typeFingerprint(t) + "{" + child + "}" + } + return "" +} + +func (LargeListType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes)}} +} + +func (LargeListType) OffsetTypeTraits() OffsetTraits { return Int64Traits } + +func LargeListOfField(f Field) *LargeListType { + if f.Type == nil { + panic("arrow: nil type for list field") + } + return &LargeListType{ListType{elem: f}} +} + +// ListOf returns the list type with element type t. +// For example, if t represents int32, ListOf(t) represents []int32. +// +// ListOf panics if t is nil or invalid. NullableElem defaults to true +func LargeListOf(t DataType) *LargeListType { + if t == nil { + panic("arrow: nil DataType") + } + return &LargeListType{ListType{elem: Field{Name: "item", Type: t, Nullable: true}}} +} + +// ListOfNonNullable is like ListOf but NullableElem defaults to false, indicating +// that the child type should be marked as non-nullable. +func LargeListOfNonNullable(t DataType) *LargeListType { + if t == nil { + panic("arrow: nil DataType") + } + return &LargeListType{ListType{elem: Field{Name: "item", Type: t, Nullable: false}}} +} + +// FixedSizeListType describes a nested type in which each array slot contains +// a fixed-size sequence of values, all having the same relative type. +type FixedSizeListType struct { + n int32 // number of elements in the list + elem Field +} + +func FixedSizeListOfField(n int32, f Field) *FixedSizeListType { + if f.Type == nil { + panic("arrow: nil DataType") + } + if n <= 0 { + panic("arrow: invalid size") + } + return &FixedSizeListType{n: n, elem: f} +} + +// FixedSizeListOf returns the list type with element type t. +// For example, if t represents int32, FixedSizeListOf(10, t) represents [10]int32. +// +// FixedSizeListOf panics if t is nil or invalid. +// FixedSizeListOf panics if n is <= 0. +// NullableElem defaults to true +func FixedSizeListOf(n int32, t DataType) *FixedSizeListType { + if t == nil { + panic("arrow: nil DataType") + } + if n <= 0 { + panic("arrow: invalid size") + } + return &FixedSizeListType{n: n, elem: Field{Name: "item", Type: t, Nullable: true}} +} + +// FixedSizeListOfNonNullable is like FixedSizeListOf but NullableElem defaults to false +// indicating that the child type should be marked as non-nullable. +func FixedSizeListOfNonNullable(n int32, t DataType) *FixedSizeListType { + if t == nil { + panic("arrow: nil DataType") + } + if n <= 0 { + panic("arrow: invalid size") + } + return &FixedSizeListType{n: n, elem: Field{Name: "item", Type: t, Nullable: false}} +} + +func (*FixedSizeListType) ID() Type { return FIXED_SIZE_LIST } +func (*FixedSizeListType) Name() string { return "fixed_size_list" } +func (t *FixedSizeListType) String() string { + if t.elem.Nullable { + return fmt.Sprintf("fixed_size_list<%s: %s, nullable>[%d]", t.elem.Name, t.elem.Type, t.n) + } + return fmt.Sprintf("fixed_size_list<%s: %s>[%d]", t.elem.Name, t.elem.Type, t.n) +} + +func (t *FixedSizeListType) SetElemNullable(n bool) { t.elem.Nullable = n } + +// Elem returns the FixedSizeListType's element type. +func (t *FixedSizeListType) Elem() DataType { return t.elem.Type } + +// Len returns the FixedSizeListType's size. +func (t *FixedSizeListType) Len() int32 { return t.n } + +func (t *FixedSizeListType) ElemField() Field { + return t.elem +} + +func (t *FixedSizeListType) Fingerprint() string { + child := t.elem.Type.Fingerprint() + if len(child) > 0 { + return fmt.Sprintf("%s[%d]{%s}", typeFingerprint(t), t.n, child) + } + return "" +} + +func (t *FixedSizeListType) Fields() []Field { return []Field{t.ElemField()} } + +func (FixedSizeListType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}} +} + +// StructType describes a nested type parameterized by an ordered sequence +// of relative types, called its fields. +type StructType struct { + fields []Field + index map[string]int + meta Metadata +} + +// StructOf returns the struct type with fields fs. +// +// StructOf panics if there are duplicated fields. +// StructOf panics if there is a field with an invalid DataType. +func StructOf(fs ...Field) *StructType { + n := len(fs) + if n == 0 { + return &StructType{} + } + + t := &StructType{ + fields: make([]Field, n), + index: make(map[string]int, n), + } + for i, f := range fs { + if f.Type == nil { + panic("arrow: field with nil DataType") + } + t.fields[i] = Field{ + Name: f.Name, + Type: f.Type, + Nullable: f.Nullable, + Metadata: f.Metadata.clone(), + } + if _, dup := t.index[f.Name]; dup { + panic(fmt.Errorf("arrow: duplicate field with name %q", f.Name)) + } + t.index[f.Name] = i + } + + return t +} + +func (*StructType) ID() Type { return STRUCT } +func (*StructType) Name() string { return "struct" } + +func (t *StructType) String() string { + var o strings.Builder + o.WriteString("struct<") + for i, f := range t.fields { + if i > 0 { + o.WriteString(", ") + } + o.WriteString(fmt.Sprintf("%s: %v", f.Name, f.Type)) + } + o.WriteString(">") + return o.String() +} + +func (t *StructType) Fields() []Field { return t.fields } +func (t *StructType) Field(i int) Field { return t.fields[i] } + +func (t *StructType) FieldByName(name string) (Field, bool) { + i, ok := t.index[name] + if !ok { + return Field{}, false + } + return t.fields[i], true +} + +func (t *StructType) FieldIdx(name string) (int, bool) { + i, ok := t.index[name] + return i, ok +} + +func (t *StructType) Fingerprint() string { + var b strings.Builder + b.WriteString(typeFingerprint(t)) + b.WriteByte('{') + for _, c := range t.fields { + child := c.Fingerprint() + if len(child) == 0 { + return "" + } + b.WriteString(child) + b.WriteByte(';') + } + b.WriteByte('}') + return b.String() +} + +func (StructType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}} +} + +type MapType struct { + value *ListType + KeysSorted bool +} + +func MapOf(key, item DataType) *MapType { + if key == nil || item == nil { + panic("arrow: nil key or item type for MapType") + } + + return &MapType{value: ListOf(StructOf(Field{Name: "key", Type: key}, Field{Name: "value", Type: item, Nullable: true}))} +} + +func MapOfWithMetadata(key DataType, keyMetadata Metadata, item DataType, itemMetadata Metadata) *MapType { + if key == nil || item == nil { + panic("arrow: nil key or item type for MapType") + } + + return &MapType{value: ListOf(StructOf(Field{ + Name: "key", + Type: key, + Metadata: keyMetadata, + }, Field{ + Name: "value", + Type: item, + Nullable: true, + Metadata: itemMetadata, + }))} +} + +func (*MapType) ID() Type { return MAP } +func (*MapType) Name() string { return "map" } + +func (t *MapType) String() string { + var o strings.Builder + o.WriteString(fmt.Sprintf("map<%s, %s", + t.value.Elem().(*StructType).Field(0).Type, + t.value.Elem().(*StructType).Field(1).Type)) + if t.KeysSorted { + o.WriteString(", keys_sorted") + } + if t.ItemField().Nullable { + o.WriteString(", items_nullable") + } else { + o.WriteString(", items_non_nullable") + } + o.WriteString(">") + return o.String() +} + +func (t *MapType) KeyField() Field { return t.value.Elem().(*StructType).Field(0) } +func (t *MapType) KeyType() DataType { return t.KeyField().Type } +func (t *MapType) ItemField() Field { return t.value.Elem().(*StructType).Field(1) } +func (t *MapType) ItemType() DataType { return t.ItemField().Type } +func (t *MapType) ValueType() *StructType { return t.value.Elem().(*StructType) } +func (t *MapType) ValueField() Field { + return Field{ + Name: "entries", + Type: t.ValueType(), + } +} + +func (t *MapType) SetItemNullable(nullable bool) { + t.value.Elem().(*StructType).fields[1].Nullable = nullable +} + +func (t *MapType) Fingerprint() string { + keyFingerprint := t.KeyType().Fingerprint() + itemFingerprint := t.ItemType().Fingerprint() + if keyFingerprint == "" || itemFingerprint == "" { + return "" + } + + fingerprint := typeFingerprint(t) + if t.KeysSorted { + fingerprint += "s" + } + return fingerprint + "{" + keyFingerprint + itemFingerprint + "}" +} + +func (t *MapType) Fields() []Field { return []Field{t.ValueField()} } + +func (t *MapType) Layout() DataTypeLayout { + return t.value.Layout() +} + +func (MapType) OffsetTypeTraits() OffsetTraits { return Int32Traits } + +type ( + // UnionTypeCode is an alias to int8 which is the type of the ids + // used for union arrays. + UnionTypeCode = int8 + UnionMode int8 +) + +const ( + MaxUnionTypeCode UnionTypeCode = 127 + InvalidUnionChildID int = -1 + + SparseMode UnionMode = iota // SPARSE + DenseMode // DENSE +) + +// UnionType is an interface to encompass both Dense and Sparse Union types. +// +// A UnionType is a nested type where each logical value is taken +// from a single child. A buffer of 8-bit type ids (typed as UnionTypeCode) +// indicates which child a given logical value is to be taken from. This is +// represented as the "child id" or "child index", which is the index into the +// list of child fields for a given child. +type UnionType interface { + NestedType + // Mode returns either SparseMode or DenseMode depending on the current + // concrete data type. + Mode() UnionMode + // ChildIDs returns a slice of ints to map UnionTypeCode values to + // the index in the Fields that represents the given Type. It is + // initialized with all values being InvalidUnionChildID (-1) + // before being populated based on the TypeCodes and fields of the type. + // The field for a given type can be retrieved by Fields()[ChildIDs()[typeCode]] + ChildIDs() []int + // TypeCodes returns the list of available type codes for this union type + // which will correspond to indexes into the ChildIDs slice to locate the + // appropriate child. A union Array contains a buffer of these type codes + // which indicate for a given index, which child has the value for that index. + TypeCodes() []UnionTypeCode + // MaxTypeCode returns the value of the largest TypeCode in the list of typecodes + // that are defined by this Union type + MaxTypeCode() UnionTypeCode +} + +// UnionOf returns an appropriate union type for the given Mode (Sparse or Dense), +// child fields, and type codes. len(fields) == len(typeCodes) must be true, or else +// this will panic. len(fields) can be 0. +func UnionOf(mode UnionMode, fields []Field, typeCodes []UnionTypeCode) UnionType { + switch mode { + case SparseMode: + return SparseUnionOf(fields, typeCodes) + case DenseMode: + return DenseUnionOf(fields, typeCodes) + default: + panic("arrow: invalid union mode") + } +} + +type unionType struct { + children []Field + typeCodes []UnionTypeCode + childIDs [int(MaxUnionTypeCode) + 1]int +} + +func (t *unionType) init(fields []Field, typeCodes []UnionTypeCode) { + // initialize all child IDs to -1 + t.childIDs[0] = InvalidUnionChildID + for i := 1; i < len(t.childIDs); i *= 2 { + copy(t.childIDs[i:], t.childIDs[:i]) + } + + t.children = fields + t.typeCodes = typeCodes + + for i, tc := range t.typeCodes { + t.childIDs[tc] = i + } +} + +func (t unionType) Fields() []Field { return t.children } +func (t unionType) TypeCodes() []UnionTypeCode { return t.typeCodes } +func (t unionType) ChildIDs() []int { return t.childIDs[:] } + +func (t *unionType) validate(fields []Field, typeCodes []UnionTypeCode, _ UnionMode) error { + if len(fields) != len(typeCodes) { + return errors.New("arrow: union types should have the same number of fields as type codes") + } + + for _, c := range typeCodes { + if c < 0 || c > MaxUnionTypeCode { + return errors.New("arrow: union type code out of bounds") + } + } + return nil +} + +func (t *unionType) MaxTypeCode() (max UnionTypeCode) { + if len(t.typeCodes) == 0 { + return + } + + max = t.typeCodes[0] + for _, c := range t.typeCodes[1:] { + if c > max { + max = c + } + } + return +} + +func (t *unionType) String() string { + var b strings.Builder + b.WriteByte('<') + for i := range t.typeCodes { + if i != 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%s=%d", t.children[i], t.typeCodes[i]) + } + b.WriteByte('>') + return b.String() +} + +func (t *unionType) fingerprint() string { + var b strings.Builder + for _, c := range t.typeCodes { + fmt.Fprintf(&b, ":%d", c) + } + b.WriteString("]{") + for _, c := range t.children { + fingerprint := c.Fingerprint() + if len(fingerprint) == 0 { + return "" + } + b.WriteString(fingerprint) + b.WriteByte(';') + } + b.WriteByte('}') + return b.String() +} + +func fieldsFromArrays(arrays []Array, names ...string) (ret []Field) { + ret = make([]Field, len(arrays)) + if len(names) == 0 { + for i, c := range arrays { + ret[i] = Field{Name: strconv.Itoa(i), Type: c.DataType(), Nullable: true} + } + } else { + debug.Assert(len(names) == len(arrays), "mismatch of arrays and names") + for i, c := range arrays { + ret[i] = Field{Name: names[i], Type: c.DataType(), Nullable: true} + } + } + return +} + +// SparseUnionType is the concrete type for Sparse union data. +// +// A sparse union is a nested type where each logical value is taken +// from a single child. A buffer of 8-bit type ids indicates which child +// a given logical value is to be taken from. +// +// In a sparse union, each child array will have the same length as the +// union array itself, regardless of the actual number of union values which +// refer to it. +// +// Unlike most other types, unions do not have a top-level validity bitmap. +type SparseUnionType struct { + unionType +} + +// SparseUnionFromArrays enables creating a union type from a list of Arrays, +// field names, and type codes. len(fields) should be either 0 or equal to len(children). +// len(codes) should also be either 0, or equal to len(children). +// +// If len(fields) == 0, then the fields will be named numerically as "0", "1", "2"... +// and so on. If len(codes) == 0, then the type codes will be constructed as +// [0, 1, 2, ..., n]. +func SparseUnionFromArrays(children []Array, fields []string, codes []UnionTypeCode) *SparseUnionType { + if len(codes) == 0 { + codes = make([]UnionTypeCode, len(children)) + for i := range children { + codes[i] = UnionTypeCode(i) + } + } + return SparseUnionOf(fieldsFromArrays(children, fields...), codes) +} + +// SparseUnionOf is equivalent to UnionOf(arrow.SparseMode, fields, typeCodes), +// constructing a SparseUnionType from a list of fields and type codes. +// +// If len(fields) != len(typeCodes) this will panic. They are allowed to be +// of length 0. +func SparseUnionOf(fields []Field, typeCodes []UnionTypeCode) *SparseUnionType { + ret := &SparseUnionType{} + if err := ret.validate(fields, typeCodes, ret.Mode()); err != nil { + panic(err) + } + ret.init(fields, typeCodes) + return ret +} + +func (SparseUnionType) ID() Type { return SPARSE_UNION } +func (SparseUnionType) Name() string { return "sparse_union" } +func (SparseUnionType) Mode() UnionMode { return SparseMode } +func (t *SparseUnionType) Fingerprint() string { + return typeFingerprint(t) + "[s" + t.fingerprint() +} +func (SparseUnionType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecFixedWidth(Uint8SizeBytes)}} +} +func (t *SparseUnionType) String() string { + return t.Name() + t.unionType.String() +} + +// DenseUnionType is the concrete type for dense union data. +// +// A dense union is a nested type where each logical value is taken from a +// single child, at a specific offset. A buffer of 8-bit type ids (typed +// as UnionTypeCode) indicates which child a given logical value is to be +// taken from and a buffer of 32-bit offsets indicating which physical position +// in the given child array has the logical value for that index. +// +// Unlike a sparse union, a dense union allows encoding only the child values +// which are actually referred to by the union array. This is counterbalanced +// by the additional footprint of the offsets buffer, and the additional +// indirection cost when looking up values. +// +// Unlike most other types, unions don't have a top-level validity bitmap +type DenseUnionType struct { + unionType +} + +// DenseUnionFromArrays enables creating a union type from a list of Arrays, +// field names, and type codes. len(fields) should be either 0 or equal to len(children). +// len(codes) should also be either 0, or equal to len(children). +// +// If len(fields) == 0, then the fields will be named numerically as "0", "1", "2"... +// and so on. If len(codes) == 0, then the type codes will be constructed as +// [0, 1, 2, ..., n]. +func DenseUnionFromArrays(children []Array, fields []string, codes []UnionTypeCode) *DenseUnionType { + if len(codes) == 0 { + codes = make([]UnionTypeCode, len(children)) + for i := range children { + codes[i] = UnionTypeCode(i) + } + } + return DenseUnionOf(fieldsFromArrays(children, fields...), codes) +} + +// DenseUnionOf is equivalent to UnionOf(arrow.DenseMode, fields, typeCodes), +// constructing a SparseUnionType from a list of fields and type codes. +// +// If len(fields) != len(typeCodes) this will panic. They are allowed to be +// of length 0. +func DenseUnionOf(fields []Field, typeCodes []UnionTypeCode) *DenseUnionType { + ret := &DenseUnionType{} + if err := ret.validate(fields, typeCodes, ret.Mode()); err != nil { + panic(err) + } + ret.init(fields, typeCodes) + return ret +} + +func (DenseUnionType) ID() Type { return DENSE_UNION } +func (DenseUnionType) Name() string { return "dense_union" } +func (DenseUnionType) Mode() UnionMode { return DenseMode } +func (t *DenseUnionType) Fingerprint() string { + return typeFingerprint(t) + "[s" + t.fingerprint() +} + +func (DenseUnionType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecFixedWidth(Uint8SizeBytes), SpecFixedWidth(Int32SizeBytes)}} +} + +func (DenseUnionType) OffsetTypeTraits() OffsetTraits { return Int32Traits } + +func (t *DenseUnionType) String() string { + return t.Name() + t.unionType.String() +} + +type Field struct { + Name string // Field name + Type DataType // The field's data type + Nullable bool // Fields can be nullable + Metadata Metadata // The field's metadata, if any +} + +func (f Field) Fingerprint() string { + typeFingerprint := f.Type.Fingerprint() + if typeFingerprint == "" { + return "" + } + + var b strings.Builder + b.WriteByte('F') + if f.Nullable { + b.WriteByte('n') + } else { + b.WriteByte('N') + } + b.WriteString(f.Name) + b.WriteByte('{') + b.WriteString(typeFingerprint) + b.WriteByte('}') + return b.String() +} + +func (f Field) HasMetadata() bool { return f.Metadata.Len() != 0 } + +func (f Field) Equal(o Field) bool { + switch { + case f.Name != o.Name: + return false + case f.Nullable != o.Nullable: + return false + case !TypeEqual(f.Type, o.Type, CheckMetadata()): + return false + case !f.Metadata.Equal(o.Metadata): + return false + } + return true +} + +func (f Field) String() string { + var o strings.Builder + nullable := "" + if f.Nullable { + nullable = ", nullable" + } + fmt.Fprintf(&o, "%s: type=%v%v", f.Name, f.Type, nullable) + if f.HasMetadata() { + fmt.Fprintf(&o, "\n%*.smetadata: %v", len(f.Name)+2, "", f.Metadata) + } + return o.String() +} + +var ( + _ DataType = (*ListType)(nil) + _ DataType = (*FixedSizeListType)(nil) + _ DataType = (*StructType)(nil) + _ DataType = (*MapType)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_null.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_null.go new file mode 100644 index 00000000..2d2454c6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_null.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +// NullType describes a degenerate array, with zero physical storage. +type NullType struct{} + +func (*NullType) ID() Type { return NULL } +func (*NullType) Name() string { return "null" } +func (*NullType) String() string { return "null" } +func (*NullType) Fingerprint() string { return typeIDFingerprint(NULL) } +func (*NullType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecAlwaysNull()}} +} + +var ( + Null *NullType + _ DataType = Null +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go new file mode 100644 index 00000000..62cbd900 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go @@ -0,0 +1,206 @@ +// Code generated by datatype_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +type Int8Type struct{} + +func (t *Int8Type) ID() Type { return INT8 } +func (t *Int8Type) Name() string { return "int8" } +func (t *Int8Type) String() string { return "int8" } +func (t *Int8Type) BitWidth() int { return 8 } +func (t *Int8Type) Bytes() int { return Int8SizeBytes } +func (t *Int8Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Int8Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Int8SizeBytes)}} +} + +type Int16Type struct{} + +func (t *Int16Type) ID() Type { return INT16 } +func (t *Int16Type) Name() string { return "int16" } +func (t *Int16Type) String() string { return "int16" } +func (t *Int16Type) BitWidth() int { return 16 } +func (t *Int16Type) Bytes() int { return Int16SizeBytes } +func (t *Int16Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Int16Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Int16SizeBytes)}} +} + +type Int32Type struct{} + +func (t *Int32Type) ID() Type { return INT32 } +func (t *Int32Type) Name() string { return "int32" } +func (t *Int32Type) String() string { return "int32" } +func (t *Int32Type) BitWidth() int { return 32 } +func (t *Int32Type) Bytes() int { return Int32SizeBytes } +func (t *Int32Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Int32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}} +} + +type Int64Type struct{} + +func (t *Int64Type) ID() Type { return INT64 } +func (t *Int64Type) Name() string { return "int64" } +func (t *Int64Type) String() string { return "int64" } +func (t *Int64Type) BitWidth() int { return 64 } +func (t *Int64Type) Bytes() int { return Int64SizeBytes } +func (t *Int64Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Int64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Int64SizeBytes)}} +} + +type Uint8Type struct{} + +func (t *Uint8Type) ID() Type { return UINT8 } +func (t *Uint8Type) Name() string { return "uint8" } +func (t *Uint8Type) String() string { return "uint8" } +func (t *Uint8Type) BitWidth() int { return 8 } +func (t *Uint8Type) Bytes() int { return Uint8SizeBytes } +func (t *Uint8Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Uint8Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Uint8SizeBytes)}} +} + +type Uint16Type struct{} + +func (t *Uint16Type) ID() Type { return UINT16 } +func (t *Uint16Type) Name() string { return "uint16" } +func (t *Uint16Type) String() string { return "uint16" } +func (t *Uint16Type) BitWidth() int { return 16 } +func (t *Uint16Type) Bytes() int { return Uint16SizeBytes } +func (t *Uint16Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Uint16Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Uint16SizeBytes)}} +} + +type Uint32Type struct{} + +func (t *Uint32Type) ID() Type { return UINT32 } +func (t *Uint32Type) Name() string { return "uint32" } +func (t *Uint32Type) String() string { return "uint32" } +func (t *Uint32Type) BitWidth() int { return 32 } +func (t *Uint32Type) Bytes() int { return Uint32SizeBytes } +func (t *Uint32Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Uint32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Uint32SizeBytes)}} +} + +type Uint64Type struct{} + +func (t *Uint64Type) ID() Type { return UINT64 } +func (t *Uint64Type) Name() string { return "uint64" } +func (t *Uint64Type) String() string { return "uint64" } +func (t *Uint64Type) BitWidth() int { return 64 } +func (t *Uint64Type) Bytes() int { return Uint64SizeBytes } +func (t *Uint64Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Uint64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Uint64SizeBytes)}} +} + +type Float32Type struct{} + +func (t *Float32Type) ID() Type { return FLOAT32 } +func (t *Float32Type) Name() string { return "float32" } +func (t *Float32Type) String() string { return "float32" } +func (t *Float32Type) BitWidth() int { return 32 } +func (t *Float32Type) Bytes() int { return Float32SizeBytes } +func (t *Float32Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Float32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Float32SizeBytes)}} +} + +type Float64Type struct{} + +func (t *Float64Type) ID() Type { return FLOAT64 } +func (t *Float64Type) Name() string { return "float64" } +func (t *Float64Type) String() string { return "float64" } +func (t *Float64Type) BitWidth() int { return 64 } +func (t *Float64Type) Bytes() int { return Float64SizeBytes } +func (t *Float64Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Float64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Float64SizeBytes)}} +} + +type Date32Type struct{} + +func (t *Date32Type) ID() Type { return DATE32 } +func (t *Date32Type) Name() string { return "date32" } +func (t *Date32Type) String() string { return "date32" } +func (t *Date32Type) BitWidth() int { return 32 } +func (t *Date32Type) Bytes() int { return Date32SizeBytes } +func (t *Date32Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Date32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Date32SizeBytes)}} +} + +type Date64Type struct{} + +func (t *Date64Type) ID() Type { return DATE64 } +func (t *Date64Type) Name() string { return "date64" } +func (t *Date64Type) String() string { return "date64" } +func (t *Date64Type) BitWidth() int { return 64 } +func (t *Date64Type) Bytes() int { return Date64SizeBytes } +func (t *Date64Type) Fingerprint() string { return typeFingerprint(t) } +func (t *Date64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth(Date64SizeBytes)}} +} + +var ( + PrimitiveTypes = struct { + Int8 DataType + Int16 DataType + Int32 DataType + Int64 DataType + Uint8 DataType + Uint16 DataType + Uint32 DataType + Uint64 DataType + Float32 DataType + Float64 DataType + Date32 DataType + Date64 DataType + }{ + + Int8: &Int8Type{}, + Int16: &Int16Type{}, + Int32: &Int32Type{}, + Int64: &Int64Type{}, + Uint8: &Uint8Type{}, + Uint16: &Uint16Type{}, + Uint32: &Uint32Type{}, + Uint64: &Uint64Type{}, + Float32: &Float32Type{}, + Float64: &Float64Type{}, + Date32: &Date32Type{}, + Date64: &Date64Type{}, + } +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpl new file mode 100644 index 00000000..611046af --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpl @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +{{range .In}} +type {{.Name}}Type struct {} + +func (t *{{.Name}}Type) ID() Type { return {{.Name|upper}} } +func (t *{{.Name}}Type) Name() string { return "{{.Name|lower}}" } +func (t *{{.Name}}Type) String() string { return "{{.Name|lower}}" } +func (t *{{.Name}}Type) BitWidth() int { return {{.Size}} } +func (t *{{.Name}}Type) Bytes() int { return {{.Name}}SizeBytes } +func (t *{{.Name}}Type) Fingerprint() string { return typeFingerprint(t) } +func (t *{{.Name}}Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{ + SpecBitmap(), SpecFixedWidth({{.Name}}SizeBytes)}} +} + +{{end}} + +var ( + PrimitiveTypes = struct { +{{range .In}} + {{.Name}} DataType +{{- end}} + }{ +{{range .In}} + {{.Name}}: &{{.Name}}Type{}, +{{- end}} + } +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpldata new file mode 100644 index 00000000..12e69fe6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpldata @@ -0,0 +1,66 @@ +[ + { + "Name": "Int8", + "Type": "int8", + "Size": 8 + }, + { + "Name": "Int16", + "Type": "int16", + "Size": 16 + }, + { + "Name": "Int32", + "Type": "int32", + "Size": 32 + }, + { + "Name": "Int64", + "Type": "int64", + "Size": 64 + }, + { + "Name": "Uint8", + "Type": "uint8", + "Size": 8 + }, + { + "Name": "Uint16", + "Type": "uint16", + "Size": 16 + }, + { + "Name": "Uint32", + "Type": "uint32", + "Size": 32 + }, + { + "Name": "Uint64", + "Type": "uint64", + "Size": 64 + }, + { + "Name": "Float32", + "Type": "float32", + "Size": 32 + }, + { + "Name": "Float64", + "Type": "float64", + "Size": 64 + }, + { + "Name": "Date32", + "Type": "date32", + "QualifiedType": "arrow.Date32", + "InternalType": "int32", + "Size": 32 + }, + { + "Name": "Date64", + "Type": "date64", + "QualifiedType": "arrow.Date64", + "InternalType": "int64", + "Size": 64 + } +] diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go b/vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go new file mode 100644 index 00000000..7b8c0f94 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go @@ -0,0 +1,549 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package decimal128 + +import ( + "errors" + "fmt" + "math" + "math/big" + "math/bits" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +var ( + MaxDecimal128 = New(542101086242752217, 687399551400673280-1) +) + +func GetMaxValue(prec int32) Num { + return scaleMultipliers[prec].Sub(FromU64(1)) +} + +// Num represents a signed 128-bit integer in two's complement. +// Calculations wrap around and overflow is ignored. +// +// For a discussion of the algorithms, look at Knuth's volume 2, +// Semi-numerical Algorithms section 4.3.1. +// +// Adapted from the Apache ORC C++ implementation +type Num struct { + lo uint64 // low bits + hi int64 // high bits +} + +// New returns a new signed 128-bit integer value. +func New(hi int64, lo uint64) Num { + return Num{lo: lo, hi: hi} +} + +// FromU64 returns a new signed 128-bit integer value from the provided uint64 one. +func FromU64(v uint64) Num { + return New(0, v) +} + +// FromI64 returns a new signed 128-bit integer value from the provided int64 one. +func FromI64(v int64) Num { + switch { + case v > 0: + return New(0, uint64(v)) + case v < 0: + return New(-1, uint64(v)) + default: + return Num{} + } +} + +// FromBigInt will convert a big.Int to a Num, if the value in v has a +// BitLen > 128, this will panic. +func FromBigInt(v *big.Int) (n Num) { + bitlen := v.BitLen() + if bitlen > 127 { + panic("arrow/decimal128: cannot represent value larger than 128bits") + } else if bitlen == 0 { + // if bitlen is 0, then the value is 0 so return the default zeroed + // out n + return + } + + // if the value is negative, then get the high and low bytes from + // v, and then negate it. this is because Num uses a two's compliment + // representation of values and big.Int stores the value as a bool for + // the sign and the absolute value of the integer. This means that the + // raw bytes are *always* the absolute value. + b := v.Bits() + n.lo = uint64(b[0]) + if len(b) > 1 { + n.hi = int64(b[1]) + } + if v.Sign() < 0 { + return n.Negate() + } + return +} + +// Negate returns a copy of this Decimal128 value but with the sign negated +func (n Num) Negate() Num { + n.lo = ^n.lo + 1 + n.hi = ^n.hi + if n.lo == 0 { + n.hi += 1 + } + return n +} + +func (n Num) Add(rhs Num) Num { + n.hi += rhs.hi + var carry uint64 + n.lo, carry = bits.Add64(n.lo, rhs.lo, 0) + n.hi += int64(carry) + return n +} + +func (n Num) Sub(rhs Num) Num { + n.hi -= rhs.hi + var borrow uint64 + n.lo, borrow = bits.Sub64(n.lo, rhs.lo, 0) + n.hi -= int64(borrow) + return n +} + +func (n Num) Mul(rhs Num) Num { + hi, lo := bits.Mul64(n.lo, rhs.lo) + hi += (uint64(n.hi) * rhs.lo) + (n.lo * uint64(rhs.hi)) + return Num{hi: int64(hi), lo: lo} +} + +func (n Num) Div(rhs Num) (res, rem Num) { + b := n.BigInt() + out, remainder := b.QuoRem(b, rhs.BigInt(), &big.Int{}) + return FromBigInt(out), FromBigInt(remainder) +} + +func (n Num) Pow(rhs Num) Num { + b := n.BigInt() + return FromBigInt(b.Exp(b, rhs.BigInt(), nil)) +} + +func scalePositiveFloat64(v float64, prec, scale int32) (float64, error) { + var pscale float64 + if scale >= -38 && scale <= 38 { + pscale = float64PowersOfTen[scale+38] + } else { + pscale = math.Pow10(int(scale)) + } + + v *= pscale + v = math.RoundToEven(v) + maxabs := float64PowersOfTen[prec+38] + if v <= -maxabs || v >= maxabs { + return 0, fmt.Errorf("cannot convert %f to decimal128(precision=%d, scale=%d): overflow", v, prec, scale) + } + return v, nil +} + +func fromPositiveFloat64(v float64, prec, scale int32) (Num, error) { + v, err := scalePositiveFloat64(v, prec, scale) + if err != nil { + return Num{}, err + } + + hi := math.Floor(math.Ldexp(v, -64)) + low := v - math.Ldexp(hi, 64) + return Num{hi: int64(hi), lo: uint64(low)}, nil +} + +// this has to exist despite sharing some code with fromPositiveFloat64 +// because if we don't do the casts back to float32 in between each +// step, we end up with a significantly different answer! +// Aren't floating point values so much fun? +// +// example value to use: +// +// v := float32(1.8446746e+15) +// +// You'll end up with a different values if you do: +// +// FromFloat64(float64(v), 20, 4) +// +// vs +// +// FromFloat32(v, 20, 4) +// +// because float64(v) == 1844674629206016 rather than 1844674600000000 +func fromPositiveFloat32(v float32, prec, scale int32) (Num, error) { + val, err := scalePositiveFloat64(float64(v), prec, scale) + if err != nil { + return Num{}, err + } + + hi := float32(math.Floor(math.Ldexp(float64(float32(val)), -64))) + low := float32(val) - float32(math.Ldexp(float64(hi), 64)) + return Num{hi: int64(hi), lo: uint64(low)}, nil +} + +// FromFloat32 returns a new decimal128.Num constructed from the given float32 +// value using the provided precision and scale. Will return an error if the +// value cannot be accurately represented with the desired precision and scale. +func FromFloat32(v float32, prec, scale int32) (Num, error) { + if v < 0 { + dec, err := fromPositiveFloat32(-v, prec, scale) + if err != nil { + return dec, err + } + return dec.Negate(), nil + } + return fromPositiveFloat32(v, prec, scale) +} + +// FromFloat64 returns a new decimal128.Num constructed from the given float64 +// value using the provided precision and scale. Will return an error if the +// value cannot be accurately represented with the desired precision and scale. +func FromFloat64(v float64, prec, scale int32) (Num, error) { + if v < 0 { + dec, err := fromPositiveFloat64(-v, prec, scale) + if err != nil { + return dec, err + } + return dec.Negate(), nil + } + return fromPositiveFloat64(v, prec, scale) +} + +func FromString(v string, prec, scale int32) (n Num, err error) { + // time for some math! + // Our input precision means "number of digits of precision" but the + // math/big library refers to precision in floating point terms + // where it refers to the "number of bits of precision in the mantissa". + // So we need to figure out how many bits we should use for precision, + // based on the input precision. Too much precision and we're not rounding + // when we should. Too little precision and we round when we shouldn't. + // + // In general, the number of decimal digits you get from a given number + // of bits will be: + // + // digits = log[base 10](2^nbits) + // + // it thus follows that: + // + // digits = nbits * log[base 10](2) + // nbits = digits / log[base 10](2) + // + // So we need to account for our scale since we're going to be multiplying + // by 10^scale in order to get the integral value we're actually going to use + // So to get our number of bits we do: + // + // (prec + scale + 1) / log[base10](2) + // + // Finally, we still have a sign bit, so we -1 to account for the sign bit. + // Aren't floating point numbers fun? + var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1 + + var out *big.Float + out, _, err = big.ParseFloat(v, 10, 127, big.ToNearestEven) + if err != nil { + return + } + + var tmp big.Int + val, _ := out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits).Int(&tmp) + if val.BitLen() > 127 { + return Num{}, errors.New("bitlen too large for decimal128") + } + n = FromBigInt(val) + if !n.FitsInPrecision(prec) { + err = fmt.Errorf("val %v doesn't fit in precision %d", n, prec) + } + return +} + +// ToFloat32 returns a float32 value representative of this decimal128.Num, +// but with the given scale. +func (n Num) ToFloat32(scale int32) float32 { + return float32(n.ToFloat64(scale)) +} + +func (n Num) tofloat64Positive(scale int32) float64 { + const twoTo64 float64 = 1.8446744073709552e+19 + x := float64(n.hi) * twoTo64 + x += float64(n.lo) + if scale >= -38 && scale <= 38 { + return x * float64PowersOfTen[-scale+38] + } + + return x * math.Pow10(-int(scale)) +} + +// ToFloat64 returns a float64 value representative of this decimal128.Num, +// but with the given scale. +func (n Num) ToFloat64(scale int32) float64 { + if n.hi < 0 { + return -n.Negate().tofloat64Positive(scale) + } + return n.tofloat64Positive(scale) +} + +// LowBits returns the low bits of the two's complement representation of the number. +func (n Num) LowBits() uint64 { return n.lo } + +// HighBits returns the high bits of the two's complement representation of the number. +func (n Num) HighBits() int64 { return n.hi } + +// Sign returns: +// +// -1 if x < 0 +// +// 0 if x == 0 +// +// +1 if x > 0 +func (n Num) Sign() int { + if n == (Num{}) { + return 0 + } + return int(1 | (n.hi >> 63)) +} + +func toBigIntPositive(n Num) *big.Int { + return (&big.Int{}).SetBits([]big.Word{big.Word(n.lo), big.Word(n.hi)}) +} + +// while the code would be simpler to just do lsh/rsh and add +// it turns out from benchmarking that calling SetBits passing +// in the words and negating ends up being >2x faster +func (n Num) BigInt() *big.Int { + if n.Sign() < 0 { + b := toBigIntPositive(n.Negate()) + return b.Neg(b) + } + return toBigIntPositive(n) +} + +func (n Num) Greater(other Num) bool { + return other.Less(n) +} + +func (n Num) GreaterEqual(other Num) bool { + return !n.Less(other) +} + +// Less returns true if the value represented by n is < other +func (n Num) Less(other Num) bool { + return n.hi < other.hi || (n.hi == other.hi && n.lo < other.lo) +} + +// IncreaseScaleBy returns a new decimal128.Num with the value scaled up by +// the desired amount. Must be 0 <= increase <= 38. Any data loss from scaling +// is ignored. If you wish to prevent data loss, use Rescale which will +// return an error if data loss is detected. +func (n Num) IncreaseScaleBy(increase int32) Num { + debug.Assert(increase >= 0, "invalid increase scale for decimal128") + debug.Assert(increase <= 38, "invalid increase scale for decimal128") + + v := scaleMultipliers[increase].BigInt() + return FromBigInt(v.Mul(n.BigInt(), v)) +} + +// ReduceScaleBy returns a new decimal128.Num with the value scaled down by +// the desired amount and, if 'round' is true, the value will be rounded +// accordingly. Assumes 0 <= reduce <= 38. Any data loss from scaling +// is ignored. If you wish to prevent data loss, use Rescale which will +// return an error if data loss is detected. +func (n Num) ReduceScaleBy(reduce int32, round bool) Num { + debug.Assert(reduce >= 0, "invalid reduce scale for decimal128") + debug.Assert(reduce <= 38, "invalid reduce scale for decimal128") + + if reduce == 0 { + return n + } + + divisor := scaleMultipliers[reduce].BigInt() + result, remainder := divisor.QuoRem(n.BigInt(), divisor, (&big.Int{})) + if round { + divisorHalf := scaleMultipliersHalf[reduce] + if remainder.Abs(remainder).Cmp(divisorHalf.BigInt()) != -1 { + result.Add(result, big.NewInt(int64(n.Sign()))) + } + } + return FromBigInt(result) +} + +func (n Num) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Num) (out Num, loss bool) { + var ( + value, result, remainder *big.Int + ) + value = n.BigInt() + if deltaScale < 0 { + debug.Assert(multiplier.lo != 0 || multiplier.hi != 0, "multiplier needs to not be zero") + result, remainder = (&big.Int{}).QuoRem(value, multiplier.BigInt(), (&big.Int{})) + return FromBigInt(result), remainder.Cmp(big.NewInt(0)) != 0 + } + + result = (&big.Int{}).Mul(value, multiplier.BigInt()) + out = FromBigInt(result) + cmp := result.Cmp(value) + if n.Sign() < 0 { + loss = cmp == 1 + } else { + loss = cmp == -1 + } + return +} + +// Rescale returns a new decimal128.Num with the value updated assuming +// the current value is scaled to originalScale with the new value scaled +// to newScale. If rescaling this way would cause data loss, an error is +// returned instead. +func (n Num) Rescale(originalScale, newScale int32) (out Num, err error) { + if originalScale == newScale { + return n, nil + } + + deltaScale := newScale - originalScale + absDeltaScale := int32(math.Abs(float64(deltaScale))) + + multiplier := scaleMultipliers[absDeltaScale] + var wouldHaveLoss bool + out, wouldHaveLoss = n.rescaleWouldCauseDataLoss(deltaScale, multiplier) + if wouldHaveLoss { + err = errors.New("rescale data loss") + } + return +} + +// Abs returns a new decimal128.Num that contains the absolute value of n +func (n Num) Abs() Num { + switch n.Sign() { + case -1: + return n.Negate() + } + return n +} + +// FitsInPrecision returns true or false if the value currently held by +// n would fit within precision (0 < prec <= 38) without losing any data. +func (n Num) FitsInPrecision(prec int32) bool { + debug.Assert(prec > 0, "precision must be > 0") + debug.Assert(prec <= 38, "precision must be <= 38") + return n.Abs().Less(scaleMultipliers[prec]) +} + +func (n Num) ToString(scale int32) string { + f := (&big.Float{}).SetInt(n.BigInt()) + f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + return f.Text('f', int(scale)) +} + +func GetScaleMultiplier(pow int) Num { return scaleMultipliers[pow] } + +func GetHalfScaleMultiplier(pow int) Num { return scaleMultipliersHalf[pow] } + +var ( + scaleMultipliers = [...]Num{ + FromU64(1), + FromU64(10), + FromU64(100), + FromU64(1000), + FromU64(10000), + FromU64(100000), + FromU64(1000000), + FromU64(10000000), + FromU64(100000000), + FromU64(1000000000), + FromU64(10000000000), + FromU64(100000000000), + FromU64(1000000000000), + FromU64(10000000000000), + FromU64(100000000000000), + FromU64(1000000000000000), + FromU64(10000000000000000), + FromU64(100000000000000000), + FromU64(1000000000000000000), + New(0, 10000000000000000000), + New(5, 7766279631452241920), + New(54, 3875820019684212736), + New(542, 1864712049423024128), + New(5421, 200376420520689664), + New(54210, 2003764205206896640), + New(542101, 1590897978359414784), + New(5421010, 15908979783594147840), + New(54210108, 11515845246265065472), + New(542101086, 4477988020393345024), + New(5421010862, 7886392056514347008), + New(54210108624, 5076944270305263616), + New(542101086242, 13875954555633532928), + New(5421010862427, 9632337040368467968), + New(54210108624275, 4089650035136921600), + New(542101086242752, 4003012203950112768), + New(5421010862427522, 3136633892082024448), + New(54210108624275221, 12919594847110692864), + New(542101086242752217, 68739955140067328), + New(5421010862427522170, 687399551400673280), + } + + scaleMultipliersHalf = [...]Num{ + FromU64(0), + FromU64(5), + FromU64(50), + FromU64(500), + FromU64(5000), + FromU64(50000), + FromU64(500000), + FromU64(5000000), + FromU64(50000000), + FromU64(500000000), + FromU64(5000000000), + FromU64(50000000000), + FromU64(500000000000), + FromU64(5000000000000), + FromU64(50000000000000), + FromU64(500000000000000), + FromU64(5000000000000000), + FromU64(50000000000000000), + FromU64(500000000000000000), + FromU64(5000000000000000000), + New(2, 13106511852580896768), + New(27, 1937910009842106368), + New(271, 932356024711512064), + New(2710, 9323560247115120640), + New(27105, 1001882102603448320), + New(271050, 10018821026034483200), + New(2710505, 7954489891797073920), + New(27105054, 5757922623132532736), + New(271050543, 2238994010196672512), + New(2710505431, 3943196028257173504), + New(27105054312, 2538472135152631808), + New(271050543121, 6937977277816766464), + New(2710505431213, 14039540557039009792), + New(27105054312137, 11268197054423236608), + New(271050543121376, 2001506101975056384), + New(2710505431213761, 1568316946041012224), + New(27105054312137610, 15683169460410122240), + New(271050543121376108, 9257742014424809472), + New(2710505431213761085, 343699775700336640), + } + + float64PowersOfTen = [...]float64{ + 1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, + 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, + 1e-18, 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, + 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, + 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, + 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, + 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31, + 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, + } +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go b/vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go new file mode 100644 index 00000000..3a1e57b0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go @@ -0,0 +1,648 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package decimal256 + +import ( + "errors" + "fmt" + "math" + "math/big" + "math/bits" + + "github.com/apache/arrow/go/v12/arrow/decimal128" + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +const ( + MaxPrecision = 76 + MaxScale = 76 +) + +func GetMaxValue(prec int32) Num { + return scaleMultipliers[prec].Sub(FromU64(1)) +} + +type Num struct { + // arr[0] is the lowest bits, arr[3] is the highest bits + arr [4]uint64 +} + +// New returns a new signed 256-bit integer value where x1 contains +// the highest bits with the rest of the values in order down to the +// lowest bits +// +// ie: New(1, 2, 3, 4) returns with the elements in little-endian order +// {4, 3, 2, 1} but each value is still represented as the native endianness +func New(x1, x2, x3, x4 uint64) Num { + return Num{[4]uint64{x4, x3, x2, x1}} +} + +func (n Num) Array() [4]uint64 { return n.arr } + +func (n Num) LowBits() uint64 { return n.arr[0] } + +func FromDecimal128(n decimal128.Num) Num { + var topBits uint64 + if n.Sign() < 0 { + topBits = math.MaxUint64 + } + return New(topBits, topBits, uint64(n.HighBits()), n.LowBits()) +} + +func FromU64(v uint64) Num { + return Num{[4]uint64{v, 0, 0, 0}} +} + +func FromI64(v int64) Num { + switch { + case v > 0: + return New(0, 0, 0, uint64(v)) + case v < 0: + return New(math.MaxUint64, math.MaxUint64, math.MaxUint64, uint64(v)) + default: + return Num{} + } +} + +func (n Num) Negate() Num { + var carry uint64 = 1 + for i := range n.arr { + n.arr[i] = ^n.arr[i] + carry + if n.arr[i] != 0 { + carry = 0 + } + } + return n +} + +func (n Num) Add(rhs Num) Num { + var carry uint64 + for i, v := range n.arr { + n.arr[i], carry = bits.Add64(v, rhs.arr[i], carry) + } + return n +} + +func (n Num) Sub(rhs Num) Num { + return n.Add(rhs.Negate()) +} + +func (n Num) Mul(rhs Num) Num { + b := n.BigInt() + return FromBigInt(b.Mul(b, rhs.BigInt())) +} + +func (n Num) Div(rhs Num) (res, rem Num) { + b := n.BigInt() + out, remainder := b.QuoRem(b, rhs.BigInt(), &big.Int{}) + return FromBigInt(out), FromBigInt(remainder) +} + +func (n Num) Pow(rhs Num) Num { + b := n.BigInt() + return FromBigInt(b.Exp(b, rhs.BigInt(), nil)) +} + +var pt5 = big.NewFloat(0.5) + +func FromString(v string, prec, scale int32) (n Num, err error) { + // time for some math! + // Our input precision means "number of digits of precision" but the + // math/big library refers to precision in floating point terms + // where it refers to the "number of bits of precision in the mantissa". + // So we need to figure out how many bits we should use for precision, + // based on the input precision. Too much precision and we're not rounding + // when we should. Too little precision and we round when we shouldn't. + // + // In general, the number of decimal digits you get from a given number + // of bits will be: + // + // digits = log[base 10](2^nbits) + // + // it thus follows that: + // + // digits = nbits * log[base 10](2) + // nbits = digits / log[base 10](2) + // + // So we need to account for our scale since we're going to be multiplying + // by 10^scale in order to get the integral value we're actually going to use + // So to get our number of bits we do: + // + // (prec + scale + 1) / log[base10](2) + // + // Finally, we still have a sign bit, so we -1 to account for the sign bit. + // Aren't floating point numbers fun? + var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1 + + var out *big.Float + out, _, err = big.ParseFloat(v, 10, 255, big.ToNearestEven) + if err != nil { + return + } + + out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits) + // Since we're going to truncate this to get an integer, we need to round + // the value instead because of edge cases so that we match how other implementations + // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if + // we're positive we'll add 0.5. + if out.Signbit() { + out.Sub(out, pt5) + } else { + out.Add(out, pt5) + } + + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > 255 { + return Num{}, errors.New("bitlen too large for decimal256") + } + n = FromBigInt(val) + if !n.FitsInPrecision(prec) { + err = fmt.Errorf("value %v doesn't fit in precision %d", n, prec) + } + return +} + +func FromFloat32(v float32, prec, scale int32) (Num, error) { + debug.Assert(prec > 0 && prec <= 76, "invalid precision for converting to decimal256") + + if math.IsInf(float64(v), 0) { + return Num{}, fmt.Errorf("cannot convert %f to decimal256", v) + } + + if v < 0 { + dec, err := fromPositiveFloat32(-v, prec, scale) + if err != nil { + return dec, err + } + return dec.Negate(), nil + } + return fromPositiveFloat32(v, prec, scale) +} + +func FromFloat64(v float64, prec, scale int32) (Num, error) { + debug.Assert(prec > 0 && prec <= 76, "invalid precision for converting to decimal256") + + if math.IsInf(v, 0) { + return Num{}, fmt.Errorf("cannot convert %f to decimal256", v) + } + + if v < 0 { + dec, err := fromPositiveFloat64(-v, prec, scale) + if err != nil { + return dec, err + } + return dec.Negate(), nil + } + return fromPositiveFloat64(v, prec, scale) +} + +// this has to exist despite sharing some code with fromPositiveFloat64 +// because if we don't do the casts back to float32 in between each +// step, we end up with a significantly different answer! +// Aren't floating point values so much fun? +// +// example value to use: +// +// v := float32(1.8446746e+15) +// +// You'll end up with a different values if you do: +// +// FromFloat64(float64(v), 20, 4) +// +// vs +// +// FromFloat32(v, 20, 4) +// +// because float64(v) == 1844674629206016 rather than 1844674600000000 +func fromPositiveFloat32(v float32, prec, scale int32) (Num, error) { + val, err := scalePositiveFloat64(float64(v), prec, scale) + if err != nil { + return Num{}, err + } + + v = float32(val) + var arr [4]float32 + arr[3] = float32(math.Floor(math.Ldexp(float64(v), -192))) + v -= float32(math.Ldexp(float64(arr[3]), 192)) + arr[2] = float32(math.Floor(math.Ldexp(float64(v), -128))) + v -= float32(math.Ldexp(float64(arr[2]), 128)) + arr[1] = float32(math.Floor(math.Ldexp(float64(v), -64))) + v -= float32(math.Ldexp(float64(arr[1]), 64)) + arr[0] = v + + debug.Assert(arr[3] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[3] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[2] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[2] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[1] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[1] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[0] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[0] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + return Num{[4]uint64{uint64(arr[0]), uint64(arr[1]), uint64(arr[2]), uint64(arr[3])}}, nil +} + +func scalePositiveFloat64(v float64, prec, scale int32) (float64, error) { + var pscale float64 + if scale >= -76 && scale <= 76 { + pscale = float64PowersOfTen[scale+76] + } else { + pscale = math.Pow10(int(scale)) + } + + v *= pscale + v = math.RoundToEven(v) + maxabs := float64PowersOfTen[prec+76] + if v <= -maxabs || v >= maxabs { + return 0, fmt.Errorf("cannot convert %f to decimal256(precision=%d, scale=%d): overflow", + v, prec, scale) + } + return v, nil +} + +func fromPositiveFloat64(v float64, prec, scale int32) (Num, error) { + val, err := scalePositiveFloat64(v, prec, scale) + if err != nil { + return Num{}, err + } + + var arr [4]float64 + arr[3] = math.Floor(math.Ldexp(val, -192)) + val -= math.Ldexp(arr[3], 192) + arr[2] = math.Floor(math.Ldexp(val, -128)) + val -= math.Ldexp(arr[2], 128) + arr[1] = math.Floor(math.Ldexp(val, -64)) + val -= math.Ldexp(arr[1], 64) + arr[0] = val + + debug.Assert(arr[3] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[3] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[2] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[2] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[1] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[1] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + debug.Assert(arr[0] >= 0, "bad conversion float64 to decimal256") + debug.Assert(arr[0] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64 + return Num{[4]uint64{uint64(arr[0]), uint64(arr[1]), uint64(arr[2]), uint64(arr[3])}}, nil +} + +func (n Num) tofloat64Positive(scale int32) float64 { + const ( + twoTo64 float64 = 1.8446744073709552e+19 + twoTo128 float64 = 3.402823669209385e+38 + twoTo192 float64 = 6.277101735386681e+57 + ) + + x := float64(n.arr[3]) * twoTo192 + x += float64(n.arr[2]) * twoTo128 + x += float64(n.arr[1]) * twoTo64 + x += float64(n.arr[0]) + + if scale >= -76 && scale <= 76 { + return x * float64PowersOfTen[-scale+76] + } + + return x * math.Pow10(-int(scale)) +} + +func (n Num) ToFloat32(scale int32) float32 { return float32(n.ToFloat64(scale)) } + +func (n Num) ToFloat64(scale int32) float64 { + if n.Sign() < 0 { + return -n.Negate().tofloat64Positive(scale) + } + return n.tofloat64Positive(scale) +} + +func (n Num) Sign() int { + if n == (Num{}) { + return 0 + } + return int(1 | (int64(n.arr[3]) >> 63)) +} + +func FromBigInt(v *big.Int) (n Num) { + bitlen := v.BitLen() + if bitlen > 255 { + panic("arrow/decimal256: cannot represent value larger than 256bits") + } else if bitlen == 0 { + return + } + + b := v.Bits() + for i, bits := range b { + n.arr[i] = uint64(bits) + } + if v.Sign() < 0 { + return n.Negate() + } + return +} + +func toBigIntPositive(n Num) *big.Int { + return new(big.Int).SetBits([]big.Word{big.Word(n.arr[0]), big.Word(n.arr[1]), big.Word(n.arr[2]), big.Word(n.arr[3])}) +} + +func (n Num) BigInt() *big.Int { + if n.Sign() < 0 { + b := toBigIntPositive(n.Negate()) + return b.Neg(b) + } + return toBigIntPositive(n) +} + +func (n Num) Greater(other Num) bool { + return other.Less(n) +} + +func (n Num) GreaterEqual(other Num) bool { + return !n.Less(other) +} + +func (n Num) Less(other Num) bool { + switch { + case n.arr[3] != other.arr[3]: + return int64(n.arr[3]) < int64(other.arr[3]) + case n.arr[2] != other.arr[2]: + return n.arr[2] < other.arr[2] + case n.arr[1] != other.arr[1]: + return n.arr[1] < other.arr[1] + } + return n.arr[0] < other.arr[0] +} + +func (n Num) IncreaseScaleBy(increase int32) Num { + debug.Assert(increase >= 0, "invalid amount to increase scale by") + debug.Assert(increase <= 76, "invalid amount to increase scale by") + + v := scaleMultipliers[increase].BigInt() + return FromBigInt(v.Mul(n.BigInt(), v)) +} + +func (n Num) ReduceScaleBy(reduce int32, round bool) Num { + debug.Assert(reduce >= 0, "invalid amount to reduce scale by") + debug.Assert(reduce <= 76, "invalid amount to reduce scale by") + + if reduce == 0 { + return n + } + + divisor := scaleMultipliers[reduce].BigInt() + result, remainder := divisor.QuoRem(n.BigInt(), divisor, new(big.Int)) + if round { + divisorHalf := scaleMultipliersHalf[reduce] + if remainder.Abs(remainder).Cmp(divisorHalf.BigInt()) != -1 { + result.Add(result, big.NewInt(int64(n.Sign()))) + } + } + return FromBigInt(result) +} + +func (n Num) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Num) (out Num, loss bool) { + if deltaScale < 0 { + var remainder Num + out, remainder = n.Div(multiplier) + return out, remainder != Num{} + } + + out = n.Mul(multiplier) + if n.Sign() < 0 { + loss = n.Less(out) + } else { + loss = out.Less(n) + } + return +} + +func (n Num) Rescale(original, newscale int32) (out Num, err error) { + if original == newscale { + return n, nil + } + + deltaScale := newscale - original + absDeltaScale := int32(math.Abs(float64(deltaScale))) + + multiplier := scaleMultipliers[absDeltaScale] + var wouldHaveLoss bool + out, wouldHaveLoss = n.rescaleWouldCauseDataLoss(deltaScale, multiplier) + if wouldHaveLoss { + err = errors.New("rescale data loss") + } + return +} + +func (n Num) Abs() Num { + switch n.Sign() { + case -1: + return n.Negate() + } + return n +} + +func (n Num) FitsInPrecision(prec int32) bool { + debug.Assert(prec > 0, "precision must be > 0") + debug.Assert(prec <= 76, "precision must be <= 76") + return n.Abs().Less(scaleMultipliers[prec]) +} + +func (n Num) ToString(scale int32) string { + f := (&big.Float{}).SetInt(n.BigInt()) + f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + return f.Text('f', int(scale)) +} + +func GetScaleMultiplier(pow int) Num { return scaleMultipliers[pow] } + +func GetHalfScaleMultiplier(pow int) Num { return scaleMultipliersHalf[pow] } + +var ( + scaleMultipliers = [...]Num{ + FromU64(1), + FromU64(10), + FromU64(100), + FromU64(1000), + FromU64(10000), + FromU64(100000), + FromU64(1000000), + FromU64(10000000), + FromU64(100000000), + FromU64(1000000000), + FromU64(10000000000), + FromU64(100000000000), + FromU64(1000000000000), + FromU64(10000000000000), + FromU64(100000000000000), + FromU64(1000000000000000), + FromU64(10000000000000000), + FromU64(100000000000000000), + FromU64(1000000000000000000), + New(0, 0, 0, 10000000000000000000), + New(0, 0, 5, 7766279631452241920), + New(0, 0, 54, 3875820019684212736), + New(0, 0, 542, 1864712049423024128), + New(0, 0, 5421, 200376420520689664), + New(0, 0, 54210, 2003764205206896640), + New(0, 0, 542101, 1590897978359414784), + New(0, 0, 5421010, 15908979783594147840), + New(0, 0, 54210108, 11515845246265065472), + New(0, 0, 542101086, 4477988020393345024), + New(0, 0, 5421010862, 7886392056514347008), + New(0, 0, 54210108624, 5076944270305263616), + New(0, 0, 542101086242, 13875954555633532928), + New(0, 0, 5421010862427, 9632337040368467968), + New(0, 0, 54210108624275, 4089650035136921600), + New(0, 0, 542101086242752, 4003012203950112768), + New(0, 0, 5421010862427522, 3136633892082024448), + New(0, 0, 54210108624275221, 12919594847110692864), + New(0, 0, 542101086242752217, 68739955140067328), + New(0, 0, 5421010862427522170, 687399551400673280), + New(0, 2, 17316620476856118468, 6873995514006732800), + New(0, 29, 7145508105175220139, 13399722918938673152), + New(0, 293, 16114848830623546549, 4870020673419870208), + New(0, 2938, 13574535716559052564, 11806718586779598848), + New(0, 29387, 6618148649623664334, 7386721425538678784), + New(0, 293873, 10841254275107988496, 80237960548581376), + New(0, 2938735, 16178822382532126880, 802379605485813760), + New(0, 29387358, 14214271235644855872, 8023796054858137600), + New(0, 293873587, 13015503840481697412, 6450984253743169536), + New(0, 2938735877, 1027829888850112811, 9169610316303040512), + New(0, 29387358770, 10278298888501128114, 17909126868192198656), + New(0, 293873587705, 10549268516463523069, 13070572018536022016), + New(0, 2938735877055, 13258964796087472617, 1578511669393358848), + New(0, 29387358770557, 3462439444907864858, 15785116693933588480), + New(0, 293873587705571, 16177650375369096972, 10277214349659471872), + New(0, 2938735877055718, 14202551164014556797, 10538423128046960640), + New(0, 29387358770557187, 12898303124178706663, 13150510911921848320), + New(0, 293873587705571876, 18302566799529756941, 2377900603251621888), + New(0, 2938735877055718769, 17004971331911604867, 5332261958806667264), + New(1, 10940614696847636083, 4029016655730084128, 16429131440647569408), + New(15, 17172426599928602752, 3396678409881738056, 16717361816799281152), + New(159, 5703569335900062977, 15520040025107828953, 1152921504606846976), + New(1593, 1695461137871974930, 7626447661401876602, 11529215046068469760), + New(15930, 16954611378719749304, 2477500319180559562, 4611686018427387904), + New(159309, 3525417123811528497, 6328259118096044006, 9223372036854775808), + New(1593091, 16807427164405733357, 7942358959831785217, 0), + New(15930919, 2053574980671369030, 5636613303479645706, 0), + New(159309191, 2089005733004138687, 1025900813667802212, 0), + New(1593091911, 2443313256331835254, 10259008136678022120, 0), + New(15930919111, 5986388489608800929, 10356360998232463120, 0), + New(159309191113, 4523652674959354447, 11329889613776873120, 0), + New(1593091911132, 8343038602174441244, 2618431695511421504, 0), + New(15930919111324, 9643409726906205977, 7737572881404663424, 0), + New(159309191113245, 4200376900514301694, 3588752519208427776, 0), + New(1593091911132452, 5110280857723913709, 17440781118374726144, 0), + New(15930919111324522, 14209320429820033867, 8387114520361296896, 0), + New(159309191113245227, 12965995782233477362, 10084168908774762496, 0), + New(1593091911132452277, 532749306367912313, 8607968719199866880, 0), + } + + scaleMultipliersHalf = [...]Num{ + FromU64(0), + FromU64(5), + FromU64(50), + FromU64(500), + FromU64(5000), + FromU64(50000), + FromU64(500000), + FromU64(5000000), + FromU64(50000000), + FromU64(500000000), + FromU64(5000000000), + FromU64(50000000000), + FromU64(500000000000), + FromU64(5000000000000), + FromU64(50000000000000), + FromU64(500000000000000), + FromU64(5000000000000000), + FromU64(50000000000000000), + FromU64(500000000000000000), + FromU64(5000000000000000000), + New(0, 0, 2, 13106511852580896768), + New(0, 0, 27, 1937910009842106368), + New(0, 0, 271, 932356024711512064), + New(0, 0, 2710, 9323560247115120640), + New(0, 0, 27105, 1001882102603448320), + New(0, 0, 271050, 10018821026034483200), + New(0, 0, 2710505, 7954489891797073920), + New(0, 0, 27105054, 5757922623132532736), + New(0, 0, 271050543, 2238994010196672512), + New(0, 0, 2710505431, 3943196028257173504), + New(0, 0, 27105054312, 2538472135152631808), + New(0, 0, 271050543121, 6937977277816766464), + New(0, 0, 2710505431213, 14039540557039009792), + New(0, 0, 27105054312137, 11268197054423236608), + New(0, 0, 271050543121376, 2001506101975056384), + New(0, 0, 2710505431213761, 1568316946041012224), + New(0, 0, 27105054312137610, 15683169460410122240), + New(0, 0, 271050543121376108, 9257742014424809472), + New(0, 0, 2710505431213761085, 343699775700336640), + New(0, 1, 8658310238428059234, 3436997757003366400), + New(0, 14, 12796126089442385877, 15923233496324112384), + New(0, 146, 17280796452166549082, 11658382373564710912), + New(0, 1469, 6787267858279526282, 5903359293389799424), + New(0, 14693, 12532446361666607975, 3693360712769339392), + New(0, 146936, 14643999174408770056, 40118980274290688), + New(0, 1469367, 17312783228120839248, 401189802742906880), + New(0, 14693679, 7107135617822427936, 4011898027429068800), + New(0, 146936793, 15731123957095624514, 3225492126871584768), + New(0, 1469367938, 9737286981279832213, 13808177195006296064), + New(0, 14693679385, 5139149444250564057, 8954563434096099328), + New(0, 146936793852, 14498006295086537342, 15758658046122786816), + New(0, 1469367938527, 15852854434898512116, 10012627871551455232), + New(0, 14693679385278, 10954591759308708237, 7892558346966794240), + New(0, 146936793852785, 17312197224539324294, 5138607174829735936), + New(0, 1469367938527859, 7101275582007278398, 14492583600878256128), + New(0, 14693679385278593, 15672523598944129139, 15798627492815699968), + New(0, 146936793852785938, 9151283399764878470, 10412322338480586752), + New(0, 1469367938527859384, 17725857702810578241, 11889503016258109440), + New(0, 14693679385278593849, 11237880364719817872, 8214565720323784704), + New(7, 17809585336819077184, 1698339204940869028, 8358680908399640576), + New(79, 12075156704804807296, 16983392049408690284, 9799832789158199296), + New(796, 10071102605790763273, 3813223830700938301, 5764607523034234880), + New(7965, 8477305689359874652, 1238750159590279781, 2305843009213693952), + New(79654, 10986080598760540056, 12387501595902797811, 4611686018427387904), + New(796545, 17627085619057642486, 13194551516770668416, 9223372036854775808), + New(7965459, 10250159527190460323, 2818306651739822853, 0), + New(79654595, 10267874903356845151, 9736322443688676914, 0), + New(796545955, 10445028665020693435, 5129504068339011060, 0), + New(7965459555, 12216566281659176272, 14401552535971007368, 0), + New(79654595556, 11485198374334453031, 14888316843743212368, 0), + New(796545955566, 4171519301087220622, 1309215847755710752, 0), + New(7965459555662, 4821704863453102988, 13092158477557107520, 0), + New(79654595556622, 11323560487111926655, 1794376259604213888, 0), + New(796545955566226, 2555140428861956854, 17943762596042138880, 0), + New(7965459555662261, 7104660214910016933, 13416929297035424256, 0), + New(79654595556622613, 15706369927971514489, 5042084454387381248, 0), + New(796545955566226138, 9489746690038731964, 13527356396454709248, 0), + } + + float64PowersOfTen = [...]float64{ + 1e-76, 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66, 1e-65, + 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, 1e-55, 1e-54, 1e-53, + 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, + 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, + 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17, + 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, + 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, + 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31, + 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40, 1e41, 1e42, 1e43, + 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50, 1e51, 1e52, 1e53, 1e54, 1e55, + 1e56, 1e57, 1e58, 1e59, 1e60, 1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67, + 1e68, 1e69, 1e70, 1e71, 1e72, 1e73, 1e74, 1e75, 1e76, + } +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/doc.go b/vendor/github.com/apache/arrow/go/v12/arrow/doc.go new file mode 100644 index 00000000..a9770dae --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/doc.go @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package arrow provides an implementation of Apache Arrow. + +Apache Arrow is a cross-language development platform for in-memory data. It specifies a standardized +language-independent columnar memory format for flat and hierarchical data, organized for efficient analytic +operations on modern hardware. It also provides computational libraries and zero-copy streaming +messaging and inter-process communication. + +Basics + +The fundamental data structure in Arrow is an Array, which holds a sequence of values of the same type. An array +consists of memory holding the data and an additional validity bitmap that indicates if the corresponding entry in the +array is valid (not null). If the array has no null entries, it is possible to omit this bitmap. + +Requirements + +Despite the go.mod stating go1.18, everything except for the compute package +is able to be built with go1.17 (and most is also compatible with go1.16). +*/ +package arrow + +const PkgVersion = "12.0.1" + +//go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl +//go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl +//go:generate go run _tools/tmpl/main.go -i -data=scalar/numeric.gen.go.tmpldata scalar/numeric.gen.go.tmpl scalar/numeric.gen_test.go.tmpl +//go:generate go run ./gen-flatbuffers.go + +// stringer +//go:generate stringer -type=Type +//go:generate stringer -type=UnionMode -linecomment diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go b/vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go new file mode 100644 index 00000000..1d8a6a75 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go @@ -0,0 +1,219 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoded + +import ( + "math" + "sort" + + "github.com/apache/arrow/go/v12/arrow" +) + +// FindPhysicalIndex performs a binary search on the run-ends to return +// the appropriate physical offset into the values/run-ends that corresponds +// with the logical index provided when called. If the array's logical offset +// is provided, this is equivalent to calling FindPhysicalOffset. +// +// For example, an array with run-ends [10, 20, 30, 40, 50] and a logicalIdx +// of 25 will return the value 2. This returns the smallest offset +// whose run-end is greater than the logicalIdx requested, which would +// also be the index into the values that contains the correct value. +// +// This function assumes it receives Run End Encoded array data +func FindPhysicalIndex(arr arrow.ArrayData, logicalIdx int) int { + data := arr.Children()[0] + if data.Len() == 0 { + return 0 + } + + switch data.DataType().ID() { + case arrow.INT16: + runEnds := arrow.Int16Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[data.Offset() : data.Offset()+data.Len()] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int16(logicalIdx) }) + case arrow.INT32: + runEnds := arrow.Int32Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[data.Offset() : data.Offset()+data.Len()] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int32(logicalIdx) }) + case arrow.INT64: + runEnds := arrow.Int64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[data.Offset() : data.Offset()+data.Len()] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int64(logicalIdx) }) + default: + panic("only int16, int32, and int64 are allowed for the run-ends") + } +} + +// FindPhysicalOffset performs a binary search on the run-ends to return +// the appropriate physical offset into the values/run-ends that corresponds +// with the logical offset defined in the array. +// +// For example, an array with run-ends [10, 20, 30, 40, 50] and a logical +// offset of 25 will return the value 2. This returns the smallest offset +// whose run-end is greater than the logical offset, which would also be the +// offset index into the values that contains the correct value. +// +// This function assumes it receives Run End Encoded array data +func FindPhysicalOffset(arr arrow.ArrayData) int { + return FindPhysicalIndex(arr, arr.Offset()) +} + +// GetPhysicalLength returns the physical number of values which are in +// the passed in RunEndEncoded array data. This will take into account +// the offset and length of the array as reported in the array data +// (so that it properly handles slices). +// +// This function assumes it receives Run End Encoded array data +func GetPhysicalLength(arr arrow.ArrayData) int { + if arr.Len() == 0 { + return 0 + } + + data := arr.Children()[0] + physicalOffset := FindPhysicalOffset(arr) + start, length := data.Offset()+physicalOffset, data.Len()-physicalOffset + offset := arr.Offset() + arr.Len() - 1 + + switch data.DataType().ID() { + case arrow.INT16: + runEnds := arrow.Int16Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[start : start+length] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int16(offset) }) + 1 + case arrow.INT32: + runEnds := arrow.Int32Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[start : start+length] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int32(offset) }) + 1 + case arrow.INT64: + runEnds := arrow.Int64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + runEnds = runEnds[start : start+length] + return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int64(offset) }) + 1 + default: + panic("arrow/rle: can only get rle.PhysicalLength for int16/int32/int64 run ends array") + } +} + +func getRunEnds(arr arrow.ArrayData) func(int64) int64 { + switch arr.DataType().ID() { + case arrow.INT16: + runEnds := arrow.Int16Traits.CastFromBytes(arr.Buffers()[1].Bytes()) + runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()] + return func(i int64) int64 { return int64(runEnds[i]) } + case arrow.INT32: + runEnds := arrow.Int32Traits.CastFromBytes(arr.Buffers()[1].Bytes()) + runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()] + return func(i int64) int64 { return int64(runEnds[i]) } + case arrow.INT64: + runEnds := arrow.Int64Traits.CastFromBytes(arr.Buffers()[1].Bytes()) + runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()] + return func(i int64) int64 { return int64(runEnds[i]) } + default: + panic("only int16, int32, and int64 are allowed for the run-ends") + } +} + +// MergedRuns is used to take two Run End Encoded arrays and iterate +// them, finding the correct physical indices to correspond with the +// runs. +type MergedRuns struct { + inputs [2]arrow.Array + runIndex [2]int64 + inputRunEnds [2]func(int64) int64 + runEnds [2]int64 + logicalLen int + logicalPos int + mergedEnd int64 +} + +// NewMergedRuns takes two RunEndEncoded arrays and returns a MergedRuns +// object that will allow iterating over the physical indices of the runs. +func NewMergedRuns(inputs [2]arrow.Array) *MergedRuns { + if len(inputs) == 0 { + return &MergedRuns{logicalLen: 0} + } + + mr := &MergedRuns{inputs: inputs, logicalLen: inputs[0].Len()} + for i, in := range inputs { + if in.DataType().ID() != arrow.RUN_END_ENCODED { + panic("arrow/rle: NewMergedRuns can only be called with RunLengthEncoded arrays") + } + if in.Len() != mr.logicalLen { + panic("arrow/rle: can only merge runs of RLE arrays of the same length") + } + + mr.inputRunEnds[i] = getRunEnds(in.Data().Children()[0]) + // initialize the runIndex at the physical offset - 1 so the first + // call to Next will increment it to the correct initial offset + // since the initial state is logicalPos == 0 and mergedEnd == 0 + mr.runIndex[i] = int64(FindPhysicalOffset(in.Data())) - 1 + } + + return mr +} + +// Next returns true if there are more values/runs to iterate and false +// when one of the arrays has reached the end. +func (mr *MergedRuns) Next() bool { + mr.logicalPos = int(mr.mergedEnd) + if mr.isEnd() { + return false + } + + for i := range mr.inputs { + if mr.logicalPos == int(mr.runEnds[i]) { + mr.runIndex[i]++ + } + } + mr.findMergedRun() + + return true +} + +// IndexIntoBuffer returns the physical index into the value buffer of +// the passed in array index (ie: 0 for the first array and 1 for the second) +// this takes into account the offset of the array so it is the true physical +// index into the value *buffer* in the child. +func (mr *MergedRuns) IndexIntoBuffer(id int) int64 { + return mr.runIndex[id] + int64(mr.inputs[id].Data().Children()[1].Offset()) +} + +// IndexIntoArray is like IndexIntoBuffer but it doesn't take into account +// the array offset and instead is the index that can be used with the .Value +// method on the array to get the correct value. +func (mr *MergedRuns) IndexIntoArray(id int) int64 { return mr.runIndex[id] } + +// RunLength returns the logical length of the current merged run being looked at. +func (mr *MergedRuns) RunLength() int64 { return mr.mergedEnd - int64(mr.logicalPos) } + +// AccumulatedRunLength returns the logical run end of the current merged run. +func (mr *MergedRuns) AccumulatedRunLength() int64 { return mr.mergedEnd } + +func (mr *MergedRuns) findMergedRun() { + mr.mergedEnd = int64(math.MaxInt64) + for i, in := range mr.inputs { + // logical indices of the end of the run we are currently in each input + mr.runEnds[i] = int64(mr.inputRunEnds[i](mr.runIndex[i]) - int64(in.Data().Offset())) + // the logical length may end in the middle of a run, in case the array was sliced + if mr.logicalLen < int(mr.runEnds[i]) { + mr.runEnds[i] = int64(mr.logicalLen) + } + if mr.runEnds[i] < mr.mergedEnd { + mr.mergedEnd = mr.runEnds[i] + } + } +} + +func (mr *MergedRuns) isEnd() bool { return mr.logicalPos == mr.logicalLen } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/big.go b/vendor/github.com/apache/arrow/go/v12/arrow/endian/big.go new file mode 100644 index 00000000..0b925857 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/endian/big.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build s390x +// +build s390x + +package endian + +import "encoding/binary" + +var Native = binary.BigEndian + +const ( + IsBigEndian = true + NativeEndian = BigEndian + NonNativeEndian = LittleEndian +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go b/vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go new file mode 100644 index 00000000..4ae6ded7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package endian + +import ( + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" +) + +type Endianness flatbuf.Endianness + +const ( + LittleEndian Endianness = Endianness(flatbuf.EndiannessLittle) + BigEndian Endianness = Endianness(flatbuf.EndiannessBig) +) + +func (e Endianness) String() string { + switch e { + case LittleEndian: + return "little" + case BigEndian: + return "big" + default: + debug.Assert(false, "wtf? bad endianness value") + return "???" + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/little.go b/vendor/github.com/apache/arrow/go/v12/arrow/endian/little.go new file mode 100644 index 00000000..def1fc64 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/endian/little.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !s390x +// +build !s390x + +package endian + +import "encoding/binary" + +var Native = binary.LittleEndian + +const ( + IsBigEndian = false + NativeEndian = LittleEndian + NonNativeEndian = BigEndian +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/errors.go b/vendor/github.com/apache/arrow/go/v12/arrow/errors.go new file mode 100644 index 00000000..b4a11b95 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/errors.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import "errors" + +var ( + ErrInvalid = errors.New("invalid") + ErrNotImplemented = errors.New("not implemented") + ErrType = errors.New("type error") + ErrKey = errors.New("key error") + ErrIndex = errors.New("index error") +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go b/vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go new file mode 100644 index 00000000..c46a3a1a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package float16 + +import ( + "math" + "strconv" +) + +// Num represents a half-precision floating point value (float16) +// stored on 16 bits. +// +// See https://en.wikipedia.org/wiki/Half-precision_floating-point_format for more informations. +type Num struct { + bits uint16 +} + +// New creates a new half-precision floating point value from the provided +// float32 value. +func New(f float32) Num { + b := math.Float32bits(f) + sn := uint16((b >> 31) & 0x1) + exp := (b >> 23) & 0xff + res := int16(exp) - 127 + 15 + fc := uint16(b>>13) & 0x3ff + switch { + case exp == 0: + res = 0 + case exp == 0xff: + res = 0x1f + case res > 0x1e: + res = 0x1f + fc = 0 + case res < 0x01: + res = 0 + fc = 0 + } + return Num{bits: (sn << 15) | uint16(res<<10) | fc} +} + +func (f Num) Float32() float32 { + sn := uint32((f.bits >> 15) & 0x1) + exp := (f.bits >> 10) & 0x1f + res := uint32(exp) + 127 - 15 + fc := uint32(f.bits & 0x3ff) + switch { + case exp == 0: + res = 0 + case exp == 0x1f: + res = 0xff + } + return math.Float32frombits((sn << 31) | (res << 23) | (fc << 13)) +} + +func (f Num) Uint16() uint16 { return f.bits } +func (f Num) String() string { return strconv.FormatFloat(float64(f.Float32()), 'g', -1, 32) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_off.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_off.go new file mode 100644 index 00000000..52b9a233 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_off.go @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !assert + +package debug + +// Assert will panic with msg if cond is false. +// +// msg must be a string, func() string or fmt.Stringer. +func Assert(cond bool, msg interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_on.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_on.go new file mode 100644 index 00000000..2aa5d6ac --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_on.go @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build assert + +package debug + +// Assert will panic with msg if cond is false. +// +// msg must be a string, func() string or fmt.Stringer. +func Assert(cond bool, msg interface{}) { + if !cond { + panic(getStringValue(msg)) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/doc.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/doc.go new file mode 100644 index 00000000..3ee1783c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/doc.go @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package debug provides APIs for conditional runtime assertions and debug logging. + + +Using Assert + +To enable runtime assertions, build with the assert tag. When the assert tag is omitted, +the code for the assertion will be omitted from the binary. + + +Using Log + +To enable runtime debug logs, build with the debug tag. When the debug tag is omitted, +the code for logging will be omitted from the binary. +*/ +package debug diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_off.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_off.go new file mode 100644 index 00000000..48da8e1e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_off.go @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !debug + +package debug + +func Log(interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_on.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_on.go new file mode 100644 index 00000000..99d0c8ae --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_on.go @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build debug + +package debug + +import ( + "log" + "os" +) + +var ( + debug = log.New(os.Stderr, "[D] ", log.LstdFlags) +) + +func Log(msg interface{}) { + debug.Output(1, getStringValue(msg)) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/util.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/util.go new file mode 100644 index 00000000..7bd3d538 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/util.go @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build debug assert + +package debug + +import "fmt" + +func getStringValue(v interface{}) string { + switch a := v.(type) { + case func() string: + return a() + + case string: + return a + + case fmt.Stringer: + return a.String() + + default: + panic(fmt.Sprintf("unexpected type, %t", v)) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go new file mode 100644 index 00000000..fb986d74 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go @@ -0,0 +1,406 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dictutils + +import ( + "errors" + "fmt" + "hash/maphash" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +type Kind int8 + +const ( + KindNew Kind = iota + KindDelta + KindReplacement +) + +type FieldPos struct { + parent *FieldPos + index, depth int32 +} + +func NewFieldPos() FieldPos { return FieldPos{index: -1} } + +func (f *FieldPos) Child(index int32) FieldPos { + return FieldPos{parent: f, index: index, depth: f.depth + 1} +} + +func (f *FieldPos) Path() []int32 { + path := make([]int32, f.depth) + cur := f + for i := f.depth - 1; i >= 0; i-- { + path[i] = int32(cur.index) + cur = cur.parent + } + return path +} + +type Mapper struct { + pathToID map[uint64]int64 + hasher maphash.Hash +} + +func (d *Mapper) NumDicts() int { + unique := make(map[int64]bool) + for _, id := range d.pathToID { + unique[id] = true + } + return len(unique) +} + +func (d *Mapper) AddField(id int64, fieldPath []int32) error { + d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath)) + defer d.hasher.Reset() + + sum := d.hasher.Sum64() + if _, ok := d.pathToID[sum]; ok { + return errors.New("field already mapped to id") + } + + d.pathToID[sum] = id + return nil +} + +func (d *Mapper) GetFieldID(fieldPath []int32) (int64, error) { + d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath)) + defer d.hasher.Reset() + + id, ok := d.pathToID[d.hasher.Sum64()] + if !ok { + return -1, errors.New("arrow/ipc: dictionary field not found") + } + return id, nil +} + +func (d *Mapper) NumFields() int { + return len(d.pathToID) +} + +func (d *Mapper) InsertPath(pos FieldPos) { + id := len(d.pathToID) + d.hasher.Write(arrow.Int32Traits.CastToBytes(pos.Path())) + + d.pathToID[d.hasher.Sum64()] = int64(id) + d.hasher.Reset() +} + +func (d *Mapper) ImportField(pos FieldPos, field *arrow.Field) { + dt := field.Type + if dt.ID() == arrow.EXTENSION { + dt = dt.(arrow.ExtensionType).StorageType() + } + + if dt.ID() == arrow.DICTIONARY { + d.InsertPath(pos) + // import nested dicts + if nested, ok := dt.(*arrow.DictionaryType).ValueType.(arrow.NestedType); ok { + d.ImportFields(pos, nested.Fields()) + } + return + } + + if nested, ok := dt.(arrow.NestedType); ok { + d.ImportFields(pos, nested.Fields()) + } +} + +func (d *Mapper) ImportFields(pos FieldPos, fields []arrow.Field) { + for i := range fields { + d.ImportField(pos.Child(int32(i)), &fields[i]) + } +} + +func (d *Mapper) ImportSchema(schema *arrow.Schema) { + d.pathToID = make(map[uint64]int64) + d.ImportFields(NewFieldPos(), schema.Fields()) +} + +func hasUnresolvedNestedDict(data arrow.ArrayData) bool { + d := data.(*array.Data) + if d.DataType().ID() == arrow.DICTIONARY { + if d.Dictionary().(*array.Data) == nil { + return true + } + if hasUnresolvedNestedDict(d.Dictionary()) { + return true + } + } + for _, c := range d.Children() { + if hasUnresolvedNestedDict(c) { + return true + } + } + return false +} + +type dictpair struct { + ID int64 + Dict arrow.Array +} + +type dictCollector struct { + dictionaries []dictpair + mapper *Mapper +} + +func (d *dictCollector) visitChildren(pos FieldPos, typ arrow.DataType, arr arrow.Array) error { + for i, c := range arr.Data().Children() { + child := array.MakeFromData(c) + defer child.Release() + if err := d.visit(pos.Child(int32(i)), child); err != nil { + return err + } + } + return nil +} + +func (d *dictCollector) visit(pos FieldPos, arr arrow.Array) error { + dt := arr.DataType() + if dt.ID() == arrow.EXTENSION { + dt = dt.(arrow.ExtensionType).StorageType() + arr = arr.(array.ExtensionArray).Storage() + } + + if dt.ID() == arrow.DICTIONARY { + dictarr := arr.(*array.Dictionary) + dict := dictarr.Dictionary() + + // traverse the dictionary to first gather any nested dictionaries + // so they appear in the output before their respective parents + dictType := dt.(*arrow.DictionaryType) + d.visitChildren(pos, dictType.ValueType, dict) + + id, err := d.mapper.GetFieldID(pos.Path()) + if err != nil { + return err + } + dict.Retain() + d.dictionaries = append(d.dictionaries, dictpair{ID: id, Dict: dict}) + return nil + } + return d.visitChildren(pos, dt, arr) +} + +func (d *dictCollector) collect(batch arrow.Record) error { + var ( + pos = NewFieldPos() + schema = batch.Schema() + ) + d.dictionaries = make([]dictpair, 0, d.mapper.NumFields()) + for i := range schema.Fields() { + if err := d.visit(pos.Child(int32(i)), batch.Column(i)); err != nil { + return err + } + } + return nil +} + +type dictMap map[int64][]arrow.ArrayData +type dictTypeMap map[int64]arrow.DataType + +type Memo struct { + Mapper Mapper + dict2id map[arrow.ArrayData]int64 + + id2type dictTypeMap + id2dict dictMap // map of dictionary ID to dictionary array +} + +func NewMemo() Memo { + return Memo{ + dict2id: make(map[arrow.ArrayData]int64), + id2dict: make(dictMap), + id2type: make(dictTypeMap), + Mapper: Mapper{ + pathToID: make(map[uint64]int64), + }, + } +} + +func (memo *Memo) Len() int { return len(memo.id2dict) } + +func (memo *Memo) Clear() { + for id, v := range memo.id2dict { + delete(memo.id2dict, id) + for _, d := range v { + delete(memo.dict2id, d) + d.Release() + } + } +} + +func (memo *Memo) reify(id int64, mem memory.Allocator) (arrow.ArrayData, error) { + v, ok := memo.id2dict[id] + if !ok { + return nil, fmt.Errorf("arrow/ipc: no dictionaries found for id=%d", id) + } + + if len(v) == 1 { + return v[0], nil + } + + // there are deltas we need to concatenate them with the first dictionary + toCombine := make([]arrow.Array, 0, len(v)) + // NOTE: at this point the dictionary data may not be trusted. it needs to + // be validated as concatenation can crash on invalid or corrupted data. + for _, data := range v { + if hasUnresolvedNestedDict(data) { + return nil, fmt.Errorf("arrow/ipc: delta dict with unresolved nested dictionary not implemented") + } + arr := array.MakeFromData(data) + defer arr.Release() + + toCombine = append(toCombine, arr) + defer data.Release() + } + + combined, err := array.Concatenate(toCombine, mem) + if err != nil { + return nil, err + } + defer combined.Release() + combined.Data().Retain() + + memo.id2dict[id] = []arrow.ArrayData{combined.Data()} + return combined.Data(), nil +} + +func (memo *Memo) Dict(id int64, mem memory.Allocator) (arrow.ArrayData, error) { + return memo.reify(id, mem) +} + +func (memo *Memo) AddType(id int64, typ arrow.DataType) error { + if existing, dup := memo.id2type[id]; dup && !arrow.TypeEqual(existing, typ) { + return fmt.Errorf("arrow/ipc: conflicting dictionary types for id %d", id) + } + + memo.id2type[id] = typ + return nil +} + +func (memo *Memo) Type(id int64) (arrow.DataType, bool) { + t, ok := memo.id2type[id] + return t, ok +} + +// func (memo *dictMemo) ID(v arrow.Array) int64 { +// id, ok := memo.dict2id[v] +// if ok { +// return id +// } + +// v.Retain() +// id = int64(len(memo.dict2id)) +// memo.dict2id[v] = id +// memo.id2dict[id] = v +// return id +// } + +func (memo Memo) HasDict(v arrow.ArrayData) bool { + _, ok := memo.dict2id[v] + return ok +} + +func (memo Memo) HasID(id int64) bool { + _, ok := memo.id2dict[id] + return ok +} + +func (memo *Memo) Add(id int64, v arrow.ArrayData) { + if _, dup := memo.id2dict[id]; dup { + panic(fmt.Errorf("arrow/ipc: duplicate id=%d", id)) + } + v.Retain() + memo.id2dict[id] = []arrow.ArrayData{v} + memo.dict2id[v] = id +} + +func (memo *Memo) AddDelta(id int64, v arrow.ArrayData) { + d, ok := memo.id2dict[id] + if !ok { + panic(fmt.Errorf("arrow/ipc: adding delta to non-existing id=%d", id)) + } + v.Retain() + memo.id2dict[id] = append(d, v) +} + +// AddOrReplace puts the provided dictionary into the memo table. If it +// already exists, then the new data will replace it. Otherwise it is added +// to the memo table. +func (memo *Memo) AddOrReplace(id int64, v arrow.ArrayData) bool { + d, ok := memo.id2dict[id] + if ok { + // replace the dictionary and release any existing ones + for _, dict := range d { + dict.Release() + } + d[0] = v + d = d[:1] + } else { + d = []arrow.ArrayData{v} + } + v.Retain() + memo.id2dict[id] = d + return !ok +} + +func CollectDictionaries(batch arrow.Record, mapper *Mapper) (out []dictpair, err error) { + collector := dictCollector{mapper: mapper} + err = collector.collect(batch) + out = collector.dictionaries + return +} + +func ResolveFieldDict(memo *Memo, data arrow.ArrayData, pos FieldPos, mem memory.Allocator) error { + typ := data.DataType() + if typ.ID() == arrow.EXTENSION { + typ = typ.(arrow.ExtensionType).StorageType() + } + if typ.ID() == arrow.DICTIONARY { + id, err := memo.Mapper.GetFieldID(pos.Path()) + if err != nil { + return err + } + dictData, err := memo.Dict(id, mem) + if err != nil { + return err + } + data.(*array.Data).SetDictionary(dictData) + if err := ResolveFieldDict(memo, dictData, pos, mem); err != nil { + return err + } + } + return ResolveDictionaries(memo, data.Children(), pos, mem) +} + +func ResolveDictionaries(memo *Memo, cols []arrow.ArrayData, parentPos FieldPos, mem memory.Allocator) error { + for i, c := range cols { + if c == nil { + continue + } + if err := ResolveFieldDict(memo, c, parentPos.Child(int32(i)), mem); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Binary.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Binary.go new file mode 100644 index 00000000..e8018e74 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Binary.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Opaque binary data +type Binary struct { + _tab flatbuffers.Table +} + +func GetRootAsBinary(buf []byte, offset flatbuffers.UOffsetT) *Binary { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Binary{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Binary) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Binary) Table() flatbuffers.Table { + return rcv._tab +} + +func BinaryStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func BinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Block.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Block.go new file mode 100644 index 00000000..57a697b1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Block.go @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Block struct { + _tab flatbuffers.Struct +} + +func (rcv *Block) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Block) Table() flatbuffers.Table { + return rcv._tab.Table +} + +/// Index to the start of the RecordBlock (note this is past the Message header) +func (rcv *Block) Offset() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) +} +/// Index to the start of the RecordBlock (note this is past the Message header) +func (rcv *Block) MutateOffset(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) +} + +/// Length of the metadata +func (rcv *Block) MetaDataLength() int32 { + return rcv._tab.GetInt32(rcv._tab.Pos + flatbuffers.UOffsetT(8)) +} +/// Length of the metadata +func (rcv *Block) MutateMetaDataLength(n int32) bool { + return rcv._tab.MutateInt32(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) +} + +/// Length of the data (this is aligned so there can be a gap between this and +/// the metadata). +func (rcv *Block) BodyLength() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(16)) +} +/// Length of the data (this is aligned so there can be a gap between this and +/// the metadata). +func (rcv *Block) MutateBodyLength(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n) +} + +func CreateBlock(builder *flatbuffers.Builder, offset int64, metaDataLength int32, bodyLength int64) flatbuffers.UOffsetT { + builder.Prep(8, 24) + builder.PrependInt64(bodyLength) + builder.Pad(4) + builder.PrependInt32(metaDataLength) + builder.PrependInt64(offset) + return builder.Offset() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompression.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompression.go new file mode 100644 index 00000000..6468e231 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompression.go @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Optional compression for the memory buffers constituting IPC message +/// bodies. Intended for use with RecordBatch but could be used for other +/// message types +type BodyCompression struct { + _tab flatbuffers.Table +} + +func GetRootAsBodyCompression(buf []byte, offset flatbuffers.UOffsetT) *BodyCompression { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &BodyCompression{} + x.Init(buf, n+offset) + return x +} + +func (rcv *BodyCompression) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *BodyCompression) Table() flatbuffers.Table { + return rcv._tab +} + +/// Compressor library. +/// For LZ4_FRAME, each compressed buffer must consist of a single frame. +func (rcv *BodyCompression) Codec() CompressionType { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return CompressionType(rcv._tab.GetInt8(o + rcv._tab.Pos)) + } + return 0 +} + +/// Compressor library. +/// For LZ4_FRAME, each compressed buffer must consist of a single frame. +func (rcv *BodyCompression) MutateCodec(n CompressionType) bool { + return rcv._tab.MutateInt8Slot(4, int8(n)) +} + +/// Indicates the way the record batch body was compressed +func (rcv *BodyCompression) Method() BodyCompressionMethod { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return BodyCompressionMethod(rcv._tab.GetInt8(o + rcv._tab.Pos)) + } + return 0 +} + +/// Indicates the way the record batch body was compressed +func (rcv *BodyCompression) MutateMethod(n BodyCompressionMethod) bool { + return rcv._tab.MutateInt8Slot(6, int8(n)) +} + +func BodyCompressionStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func BodyCompressionAddCodec(builder *flatbuffers.Builder, codec CompressionType) { + builder.PrependInt8Slot(0, int8(codec), 0) +} +func BodyCompressionAddMethod(builder *flatbuffers.Builder, method BodyCompressionMethod) { + builder.PrependInt8Slot(1, int8(method), 0) +} +func BodyCompressionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompressionMethod.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompressionMethod.go new file mode 100644 index 00000000..108ab3e0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompressionMethod.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// Provided for forward compatibility in case we need to support different +/// strategies for compressing the IPC message body (like whole-body +/// compression rather than buffer-level) in the future +type BodyCompressionMethod int8 + +const ( + /// Each constituent buffer is first compressed with the indicated + /// compressor, and then written with the uncompressed length in the first 8 + /// bytes as a 64-bit little-endian signed integer followed by the compressed + /// buffer bytes (and then padding as required by the protocol). The + /// uncompressed length may be set to -1 to indicate that the data that + /// follows is not compressed, which can be useful for cases where + /// compression does not yield appreciable savings. + BodyCompressionMethodBUFFER BodyCompressionMethod = 0 +) + +var EnumNamesBodyCompressionMethod = map[BodyCompressionMethod]string{ + BodyCompressionMethodBUFFER: "BUFFER", +} + +var EnumValuesBodyCompressionMethod = map[string]BodyCompressionMethod{ + "BUFFER": BodyCompressionMethodBUFFER, +} + +func (v BodyCompressionMethod) String() string { + if s, ok := EnumNamesBodyCompressionMethod[v]; ok { + return s + } + return "BodyCompressionMethod(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Bool.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Bool.go new file mode 100644 index 00000000..6a4a9d26 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Bool.go @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Bool struct { + _tab flatbuffers.Table +} + +func GetRootAsBool(buf []byte, offset flatbuffers.UOffsetT) *Bool { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Bool{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Bool) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Bool) Table() flatbuffers.Table { + return rcv._tab +} + +func BoolStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func BoolEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Buffer.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Buffer.go new file mode 100644 index 00000000..eba8d99b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Buffer.go @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// A Buffer represents a single contiguous memory segment +type Buffer struct { + _tab flatbuffers.Struct +} + +func (rcv *Buffer) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Buffer) Table() flatbuffers.Table { + return rcv._tab.Table +} + +/// The relative offset into the shared memory page where the bytes for this +/// buffer starts +func (rcv *Buffer) Offset() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) +} +/// The relative offset into the shared memory page where the bytes for this +/// buffer starts +func (rcv *Buffer) MutateOffset(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) +} + +/// The absolute length (in bytes) of the memory buffer. The memory is found +/// from offset (inclusive) to offset + length (non-inclusive). When building +/// messages using the encapsulated IPC message, padding bytes may be written +/// after a buffer, but such padding bytes do not need to be accounted for in +/// the size here. +func (rcv *Buffer) Length() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) +} +/// The absolute length (in bytes) of the memory buffer. The memory is found +/// from offset (inclusive) to offset + length (non-inclusive). When building +/// messages using the encapsulated IPC message, padding bytes may be written +/// after a buffer, but such padding bytes do not need to be accounted for in +/// the size here. +func (rcv *Buffer) MutateLength(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) +} + +func CreateBuffer(builder *flatbuffers.Builder, offset int64, length int64) flatbuffers.UOffsetT { + builder.Prep(8, 16) + builder.PrependInt64(length) + builder.PrependInt64(offset) + return builder.Offset() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/CompressionType.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/CompressionType.go new file mode 100644 index 00000000..96e9df07 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/CompressionType.go @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type CompressionType int8 + +const ( + CompressionTypeLZ4_FRAME CompressionType = 0 + CompressionTypeZSTD CompressionType = 1 +) + +var EnumNamesCompressionType = map[CompressionType]string{ + CompressionTypeLZ4_FRAME: "LZ4_FRAME", + CompressionTypeZSTD: "ZSTD", +} + +var EnumValuesCompressionType = map[string]CompressionType{ + "LZ4_FRAME": CompressionTypeLZ4_FRAME, + "ZSTD": CompressionTypeZSTD, +} + +func (v CompressionType) String() string { + if s, ok := EnumNamesCompressionType[v]; ok { + return s + } + return "CompressionType(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Date.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Date.go new file mode 100644 index 00000000..32983ec5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Date.go @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Date is either a 32-bit or 64-bit signed integer type representing an +/// elapsed time since UNIX epoch (1970-01-01), stored in either of two units: +/// +/// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no +/// leap seconds), where the values are evenly divisible by 86400000 +/// * Days (32 bits) since the UNIX epoch +type Date struct { + _tab flatbuffers.Table +} + +func GetRootAsDate(buf []byte, offset flatbuffers.UOffsetT) *Date { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Date{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Date) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Date) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Date) Unit() DateUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return DateUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 1 +} + +func (rcv *Date) MutateUnit(n DateUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func DateStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func DateAddUnit(builder *flatbuffers.Builder, unit DateUnit) { + builder.PrependInt16Slot(0, int16(unit), 1) +} +func DateEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DateUnit.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DateUnit.go new file mode 100644 index 00000000..8a12eec1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DateUnit.go @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type DateUnit int16 + +const ( + DateUnitDAY DateUnit = 0 + DateUnitMILLISECOND DateUnit = 1 +) + +var EnumNamesDateUnit = map[DateUnit]string{ + DateUnitDAY: "DAY", + DateUnitMILLISECOND: "MILLISECOND", +} + +var EnumValuesDateUnit = map[string]DateUnit{ + "DAY": DateUnitDAY, + "MILLISECOND": DateUnitMILLISECOND, +} + +func (v DateUnit) String() string { + if s, ok := EnumNamesDateUnit[v]; ok { + return s + } + return "DateUnit(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Decimal.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Decimal.go new file mode 100644 index 00000000..c9de254d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Decimal.go @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Exact decimal value represented as an integer value in two's +/// complement. Currently only 128-bit (16-byte) and 256-bit (32-byte) integers +/// are used. The representation uses the endianness indicated +/// in the Schema. +type Decimal struct { + _tab flatbuffers.Table +} + +func GetRootAsDecimal(buf []byte, offset flatbuffers.UOffsetT) *Decimal { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Decimal{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Decimal) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Decimal) Table() flatbuffers.Table { + return rcv._tab +} + +/// Total number of decimal digits +func (rcv *Decimal) Precision() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Total number of decimal digits +func (rcv *Decimal) MutatePrecision(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +/// Number of digits after the decimal point "." +func (rcv *Decimal) Scale() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Number of digits after the decimal point "." +func (rcv *Decimal) MutateScale(n int32) bool { + return rcv._tab.MutateInt32Slot(6, n) +} + +/// Number of bits per value. The only accepted widths are 128 and 256. +/// We use bitWidth for consistency with Int::bitWidth. +func (rcv *Decimal) BitWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 128 +} + +/// Number of bits per value. The only accepted widths are 128 and 256. +/// We use bitWidth for consistency with Int::bitWidth. +func (rcv *Decimal) MutateBitWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(8, n) +} + +func DecimalStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func DecimalAddPrecision(builder *flatbuffers.Builder, precision int32) { + builder.PrependInt32Slot(0, precision, 0) +} +func DecimalAddScale(builder *flatbuffers.Builder, scale int32) { + builder.PrependInt32Slot(1, scale, 0) +} +func DecimalAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) { + builder.PrependInt32Slot(2, bitWidth, 128) +} +func DecimalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryBatch.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryBatch.go new file mode 100644 index 00000000..25b5384e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryBatch.go @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// For sending dictionary encoding information. Any Field can be +/// dictionary-encoded, but in this case none of its children may be +/// dictionary-encoded. +/// There is one vector / column per dictionary, but that vector / column +/// may be spread across multiple dictionary batches by using the isDelta +/// flag +type DictionaryBatch struct { + _tab flatbuffers.Table +} + +func GetRootAsDictionaryBatch(buf []byte, offset flatbuffers.UOffsetT) *DictionaryBatch { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &DictionaryBatch{} + x.Init(buf, n+offset) + return x +} + +func (rcv *DictionaryBatch) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *DictionaryBatch) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *DictionaryBatch) Id() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *DictionaryBatch) MutateId(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +func (rcv *DictionaryBatch) Data(obj *RecordBatch) *RecordBatch { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(RecordBatch) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// If isDelta is true the values in the dictionary are to be appended to a +/// dictionary with the indicated id. If isDelta is false this dictionary +/// should replace the existing dictionary. +func (rcv *DictionaryBatch) IsDelta() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// If isDelta is true the values in the dictionary are to be appended to a +/// dictionary with the indicated id. If isDelta is false this dictionary +/// should replace the existing dictionary. +func (rcv *DictionaryBatch) MutateIsDelta(n bool) bool { + return rcv._tab.MutateBoolSlot(8, n) +} + +func DictionaryBatchStart(builder *flatbuffers.Builder) { + builder.StartObject(3) +} +func DictionaryBatchAddId(builder *flatbuffers.Builder, id int64) { + builder.PrependInt64Slot(0, id, 0) +} +func DictionaryBatchAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(data), 0) +} +func DictionaryBatchAddIsDelta(builder *flatbuffers.Builder, isDelta bool) { + builder.PrependBoolSlot(2, isDelta, false) +} +func DictionaryBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryEncoding.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryEncoding.go new file mode 100644 index 00000000..a9b09530 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryEncoding.go @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type DictionaryEncoding struct { + _tab flatbuffers.Table +} + +func GetRootAsDictionaryEncoding(buf []byte, offset flatbuffers.UOffsetT) *DictionaryEncoding { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &DictionaryEncoding{} + x.Init(buf, n+offset) + return x +} + +func (rcv *DictionaryEncoding) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *DictionaryEncoding) Table() flatbuffers.Table { + return rcv._tab +} + +/// The known dictionary id in the application where this data is used. In +/// the file or streaming formats, the dictionary ids are found in the +/// DictionaryBatch messages +func (rcv *DictionaryEncoding) Id() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// The known dictionary id in the application where this data is used. In +/// the file or streaming formats, the dictionary ids are found in the +/// DictionaryBatch messages +func (rcv *DictionaryEncoding) MutateId(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +/// The dictionary indices are constrained to be non-negative integers. If +/// this field is null, the indices must be signed int32. To maximize +/// cross-language compatibility and performance, implementations are +/// recommended to prefer signed integer types over unsigned integer types +/// and to avoid uint64 indices unless they are required by an application. +func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The dictionary indices are constrained to be non-negative integers. If +/// this field is null, the indices must be signed int32. To maximize +/// cross-language compatibility and performance, implementations are +/// recommended to prefer signed integer types over unsigned integer types +/// and to avoid uint64 indices unless they are required by an application. +/// By default, dictionaries are not ordered, or the order does not have +/// semantic meaning. In some statistical, applications, dictionary-encoding +/// is used to represent ordered categorical data, and we provide a way to +/// preserve that metadata here +func (rcv *DictionaryEncoding) IsOrdered() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// By default, dictionaries are not ordered, or the order does not have +/// semantic meaning. In some statistical, applications, dictionary-encoding +/// is used to represent ordered categorical data, and we provide a way to +/// preserve that metadata here +func (rcv *DictionaryEncoding) MutateIsOrdered(n bool) bool { + return rcv._tab.MutateBoolSlot(8, n) +} + +func (rcv *DictionaryEncoding) DictionaryKind() DictionaryKind { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return DictionaryKind(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *DictionaryEncoding) MutateDictionaryKind(n DictionaryKind) bool { + return rcv._tab.MutateInt16Slot(10, int16(n)) +} + +func DictionaryEncodingStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func DictionaryEncodingAddId(builder *flatbuffers.Builder, id int64) { + builder.PrependInt64Slot(0, id, 0) +} +func DictionaryEncodingAddIndexType(builder *flatbuffers.Builder, indexType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indexType), 0) +} +func DictionaryEncodingAddIsOrdered(builder *flatbuffers.Builder, isOrdered bool) { + builder.PrependBoolSlot(2, isOrdered, false) +} +func DictionaryEncodingAddDictionaryKind(builder *flatbuffers.Builder, dictionaryKind DictionaryKind) { + builder.PrependInt16Slot(3, int16(dictionaryKind), 0) +} +func DictionaryEncodingEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryKind.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryKind.go new file mode 100644 index 00000000..126ba5f7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryKind.go @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// ---------------------------------------------------------------------- +/// Dictionary encoding metadata +/// Maintained for forwards compatibility, in the future +/// Dictionaries might be explicit maps between integers and values +/// allowing for non-contiguous index values +type DictionaryKind int16 + +const ( + DictionaryKindDenseArray DictionaryKind = 0 +) + +var EnumNamesDictionaryKind = map[DictionaryKind]string{ + DictionaryKindDenseArray: "DenseArray", +} + +var EnumValuesDictionaryKind = map[string]DictionaryKind{ + "DenseArray": DictionaryKindDenseArray, +} + +func (v DictionaryKind) String() string { + if s, ok := EnumNamesDictionaryKind[v]; ok { + return s + } + return "DictionaryKind(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Duration.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Duration.go new file mode 100644 index 00000000..57b7b2a0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Duration.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Duration struct { + _tab flatbuffers.Table +} + +func GetRootAsDuration(buf []byte, offset flatbuffers.UOffsetT) *Duration { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Duration{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Duration) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Duration) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Duration) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 1 +} + +func (rcv *Duration) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func DurationStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func DurationAddUnit(builder *flatbuffers.Builder, unit TimeUnit) { + builder.PrependInt16Slot(0, int16(unit), 1) +} +func DurationEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Endianness.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Endianness.go new file mode 100644 index 00000000..cefa2ff9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Endianness.go @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// ---------------------------------------------------------------------- +/// Endianness of the platform producing the data +type Endianness int16 + +const ( + EndiannessLittle Endianness = 0 + EndiannessBig Endianness = 1 +) + +var EnumNamesEndianness = map[Endianness]string{ + EndiannessLittle: "Little", + EndiannessBig: "Big", +} + +var EnumValuesEndianness = map[string]Endianness{ + "Little": EndiannessLittle, + "Big": EndiannessBig, +} + +func (v Endianness) String() string { + if s, ok := EnumNamesEndianness[v]; ok { + return s + } + return "Endianness(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Feature.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Feature.go new file mode 100644 index 00000000..ae5a0398 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Feature.go @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// Represents Arrow Features that might not have full support +/// within implementations. This is intended to be used in +/// two scenarios: +/// 1. A mechanism for readers of Arrow Streams +/// and files to understand that the stream or file makes +/// use of a feature that isn't supported or unknown to +/// the implementation (and therefore can meet the Arrow +/// forward compatibility guarantees). +/// 2. A means of negotiating between a client and server +/// what features a stream is allowed to use. The enums +/// values here are intented to represent higher level +/// features, additional details maybe negotiated +/// with key-value pairs specific to the protocol. +/// +/// Enums added to this list should be assigned power-of-two values +/// to facilitate exchanging and comparing bitmaps for supported +/// features. +type Feature int64 + +const ( + /// Needed to make flatbuffers happy. + FeatureUNUSED Feature = 0 + /// The stream makes use of multiple full dictionaries with the + /// same ID and assumes clients implement dictionary replacement + /// correctly. + FeatureDICTIONARY_REPLACEMENT Feature = 1 + /// The stream makes use of compressed bodies as described + /// in Message.fbs. + FeatureCOMPRESSED_BODY Feature = 2 +) + +var EnumNamesFeature = map[Feature]string{ + FeatureUNUSED: "UNUSED", + FeatureDICTIONARY_REPLACEMENT: "DICTIONARY_REPLACEMENT", + FeatureCOMPRESSED_BODY: "COMPRESSED_BODY", +} + +var EnumValuesFeature = map[string]Feature{ + "UNUSED": FeatureUNUSED, + "DICTIONARY_REPLACEMENT": FeatureDICTIONARY_REPLACEMENT, + "COMPRESSED_BODY": FeatureCOMPRESSED_BODY, +} + +func (v Feature) String() string { + if s, ok := EnumNamesFeature[v]; ok { + return s + } + return "Feature(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Field.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Field.go new file mode 100644 index 00000000..c03cf2f8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Field.go @@ -0,0 +1,188 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// A field represents a named column in a record / row batch or child of a +/// nested type. +type Field struct { + _tab flatbuffers.Table +} + +func GetRootAsField(buf []byte, offset flatbuffers.UOffsetT) *Field { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Field{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Field) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Field) Table() flatbuffers.Table { + return rcv._tab +} + +/// Name is not required, in i.e. a List +func (rcv *Field) Name() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +/// Name is not required, in i.e. a List +/// Whether or not this field can contain nulls. Should be true in general. +func (rcv *Field) Nullable() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// Whether or not this field can contain nulls. Should be true in general. +func (rcv *Field) MutateNullable(n bool) bool { + return rcv._tab.MutateBoolSlot(6, n) +} + +func (rcv *Field) TypeType() Type { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return Type(rcv._tab.GetByte(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Field) MutateTypeType(n Type) bool { + return rcv._tab.MutateByteSlot(8, byte(n)) +} + +/// This is the type of the decoded value if the field is dictionary encoded. +func (rcv *Field) Type(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// This is the type of the decoded value if the field is dictionary encoded. +/// Present only if the field is dictionary encoded. +func (rcv *Field) Dictionary(obj *DictionaryEncoding) *DictionaryEncoding { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(DictionaryEncoding) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// Present only if the field is dictionary encoded. +/// children apply only to nested data types like Struct, List and Union. For +/// primitive types children will have length 0. +func (rcv *Field) Children(obj *Field, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Field) ChildrenLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// children apply only to nested data types like Struct, List and Union. For +/// primitive types children will have length 0. +/// User-defined metadata +func (rcv *Field) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Field) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// User-defined metadata +func FieldStart(builder *flatbuffers.Builder) { + builder.StartObject(7) +} +func FieldAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0) +} +func FieldAddNullable(builder *flatbuffers.Builder, nullable bool) { + builder.PrependBoolSlot(1, nullable, false) +} +func FieldAddTypeType(builder *flatbuffers.Builder, typeType Type) { + builder.PrependByteSlot(2, byte(typeType), 0) +} +func FieldAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(type_), 0) +} +func FieldAddDictionary(builder *flatbuffers.Builder, dictionary flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(dictionary), 0) +} +func FieldAddChildren(builder *flatbuffers.Builder, children flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(children), 0) +} +func FieldStartChildrenVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func FieldAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(customMetadata), 0) +} +func FieldStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func FieldEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FieldNode.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FieldNode.go new file mode 100644 index 00000000..606b30bf --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FieldNode.go @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// Data structures for describing a table row batch (a collection of +/// equal-length Arrow arrays) +/// Metadata about a field at some level of a nested type tree (but not +/// its children). +/// +/// For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]` +/// would have {length: 5, null_count: 2} for its List node, and {length: 6, +/// null_count: 0} for its Int16 node, as separate FieldNode structs +type FieldNode struct { + _tab flatbuffers.Struct +} + +func (rcv *FieldNode) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FieldNode) Table() flatbuffers.Table { + return rcv._tab.Table +} + +/// The number of value slots in the Arrow array at this level of a nested +/// tree +func (rcv *FieldNode) Length() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) +} +/// The number of value slots in the Arrow array at this level of a nested +/// tree +func (rcv *FieldNode) MutateLength(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) +} + +/// The number of observed nulls. Fields with null_count == 0 may choose not +/// to write their physical validity bitmap out as a materialized buffer, +/// instead setting the length of the bitmap buffer to 0. +func (rcv *FieldNode) NullCount() int64 { + return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) +} +/// The number of observed nulls. Fields with null_count == 0 may choose not +/// to write their physical validity bitmap out as a materialized buffer, +/// instead setting the length of the bitmap buffer to 0. +func (rcv *FieldNode) MutateNullCount(n int64) bool { + return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) +} + +func CreateFieldNode(builder *flatbuffers.Builder, length int64, nullCount int64) flatbuffers.UOffsetT { + builder.Prep(8, 16) + builder.PrependInt64(nullCount) + builder.PrependInt64(length) + return builder.Offset() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeBinary.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeBinary.go new file mode 100644 index 00000000..4e660d50 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeBinary.go @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FixedSizeBinary struct { + _tab flatbuffers.Table +} + +func GetRootAsFixedSizeBinary(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeBinary { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FixedSizeBinary{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FixedSizeBinary) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FixedSizeBinary) Table() flatbuffers.Table { + return rcv._tab +} + +/// Number of bytes per value +func (rcv *FixedSizeBinary) ByteWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Number of bytes per value +func (rcv *FixedSizeBinary) MutateByteWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func FixedSizeBinaryStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FixedSizeBinaryAddByteWidth(builder *flatbuffers.Builder, byteWidth int32) { + builder.PrependInt32Slot(0, byteWidth, 0) +} +func FixedSizeBinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeList.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeList.go new file mode 100644 index 00000000..dabf5cc8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeList.go @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FixedSizeList struct { + _tab flatbuffers.Table +} + +func GetRootAsFixedSizeList(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeList { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FixedSizeList{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FixedSizeList) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FixedSizeList) Table() flatbuffers.Table { + return rcv._tab +} + +/// Number of list items per value +func (rcv *FixedSizeList) ListSize() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +/// Number of list items per value +func (rcv *FixedSizeList) MutateListSize(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func FixedSizeListStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FixedSizeListAddListSize(builder *flatbuffers.Builder, listSize int32) { + builder.PrependInt32Slot(0, listSize, 0) +} +func FixedSizeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FloatingPoint.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FloatingPoint.go new file mode 100644 index 00000000..241d448d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FloatingPoint.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type FloatingPoint struct { + _tab flatbuffers.Table +} + +func GetRootAsFloatingPoint(buf []byte, offset flatbuffers.UOffsetT) *FloatingPoint { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &FloatingPoint{} + x.Init(buf, n+offset) + return x +} + +func (rcv *FloatingPoint) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *FloatingPoint) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *FloatingPoint) Precision() Precision { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return Precision(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *FloatingPoint) MutatePrecision(n Precision) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func FloatingPointStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func FloatingPointAddPrecision(builder *flatbuffers.Builder, precision Precision) { + builder.PrependInt16Slot(0, int16(precision), 0) +} +func FloatingPointEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Footer.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Footer.go new file mode 100644 index 00000000..65b0ff09 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Footer.go @@ -0,0 +1,162 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// Arrow File metadata +/// +type Footer struct { + _tab flatbuffers.Table +} + +func GetRootAsFooter(buf []byte, offset flatbuffers.UOffsetT) *Footer { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Footer{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Footer) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Footer) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Footer) Version() MetadataVersion { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return MetadataVersion(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Footer) MutateVersion(n MetadataVersion) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func (rcv *Footer) Schema(obj *Schema) *Schema { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Schema) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +func (rcv *Footer) Dictionaries(obj *Block, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 24 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Footer) DictionariesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Footer) RecordBatches(obj *Block, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 24 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Footer) RecordBatchesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// User-defined metadata +func (rcv *Footer) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Footer) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// User-defined metadata +func FooterStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func FooterAddVersion(builder *flatbuffers.Builder, version MetadataVersion) { + builder.PrependInt16Slot(0, int16(version), 0) +} +func FooterAddSchema(builder *flatbuffers.Builder, schema flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(schema), 0) +} +func FooterAddDictionaries(builder *flatbuffers.Builder, dictionaries flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(dictionaries), 0) +} +func FooterStartDictionariesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(24, numElems, 8) +} +func FooterAddRecordBatches(builder *flatbuffers.Builder, recordBatches flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(recordBatches), 0) +} +func FooterStartRecordBatchesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(24, numElems, 8) +} +func FooterAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(customMetadata), 0) +} +func FooterStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func FooterEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Int.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Int.go new file mode 100644 index 00000000..9f4b1911 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Int.go @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Int struct { + _tab flatbuffers.Table +} + +func GetRootAsInt(buf []byte, offset flatbuffers.UOffsetT) *Int { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Int{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Int) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Int) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Int) BitWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Int) MutateBitWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(4, n) +} + +func (rcv *Int) IsSigned() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *Int) MutateIsSigned(n bool) bool { + return rcv._tab.MutateBoolSlot(6, n) +} + +func IntStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func IntAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) { + builder.PrependInt32Slot(0, bitWidth, 0) +} +func IntAddIsSigned(builder *flatbuffers.Builder, isSigned bool) { + builder.PrependBoolSlot(1, isSigned, false) +} +func IntEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Interval.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Interval.go new file mode 100644 index 00000000..12c56d5c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Interval.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Interval struct { + _tab flatbuffers.Table +} + +func GetRootAsInterval(buf []byte, offset flatbuffers.UOffsetT) *Interval { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Interval{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Interval) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Interval) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Interval) Unit() IntervalUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return IntervalUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Interval) MutateUnit(n IntervalUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func IntervalStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func IntervalAddUnit(builder *flatbuffers.Builder, unit IntervalUnit) { + builder.PrependInt16Slot(0, int16(unit), 0) +} +func IntervalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/IntervalUnit.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/IntervalUnit.go new file mode 100644 index 00000000..f3ed1ae7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/IntervalUnit.go @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type IntervalUnit int16 + +const ( + IntervalUnitYEAR_MONTH IntervalUnit = 0 + IntervalUnitDAY_TIME IntervalUnit = 1 + IntervalUnitMONTH_DAY_NANO IntervalUnit = 2 +) + +var EnumNamesIntervalUnit = map[IntervalUnit]string{ + IntervalUnitYEAR_MONTH: "YEAR_MONTH", + IntervalUnitDAY_TIME: "DAY_TIME", + IntervalUnitMONTH_DAY_NANO: "MONTH_DAY_NANO", +} + +var EnumValuesIntervalUnit = map[string]IntervalUnit{ + "YEAR_MONTH": IntervalUnitYEAR_MONTH, + "DAY_TIME": IntervalUnitDAY_TIME, + "MONTH_DAY_NANO": IntervalUnitMONTH_DAY_NANO, +} + +func (v IntervalUnit) String() string { + if s, ok := EnumNamesIntervalUnit[v]; ok { + return s + } + return "IntervalUnit(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/KeyValue.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/KeyValue.go new file mode 100644 index 00000000..c1b85318 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/KeyValue.go @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// user defined key value pairs to add custom metadata to arrow +/// key namespacing is the responsibility of the user +type KeyValue struct { + _tab flatbuffers.Table +} + +func GetRootAsKeyValue(buf []byte, offset flatbuffers.UOffsetT) *KeyValue { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &KeyValue{} + x.Init(buf, n+offset) + return x +} + +func (rcv *KeyValue) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *KeyValue) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *KeyValue) Key() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func (rcv *KeyValue) Value() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +func KeyValueStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func KeyValueAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) +} +func KeyValueAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) +} +func KeyValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeBinary.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeBinary.go new file mode 100644 index 00000000..2c3befcc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeBinary.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Same as Binary, but with 64-bit offsets, allowing to represent +/// extremely large data values. +type LargeBinary struct { + _tab flatbuffers.Table +} + +func GetRootAsLargeBinary(buf []byte, offset flatbuffers.UOffsetT) *LargeBinary { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LargeBinary{} + x.Init(buf, n+offset) + return x +} + +func (rcv *LargeBinary) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LargeBinary) Table() flatbuffers.Table { + return rcv._tab +} + +func LargeBinaryStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func LargeBinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeList.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeList.go new file mode 100644 index 00000000..92f22845 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeList.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Same as List, but with 64-bit offsets, allowing to represent +/// extremely large data values. +type LargeList struct { + _tab flatbuffers.Table +} + +func GetRootAsLargeList(buf []byte, offset flatbuffers.UOffsetT) *LargeList { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LargeList{} + x.Init(buf, n+offset) + return x +} + +func (rcv *LargeList) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LargeList) Table() flatbuffers.Table { + return rcv._tab +} + +func LargeListStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func LargeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeUtf8.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeUtf8.go new file mode 100644 index 00000000..e78b33e1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeUtf8.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Same as Utf8, but with 64-bit offsets, allowing to represent +/// extremely large data values. +type LargeUtf8 struct { + _tab flatbuffers.Table +} + +func GetRootAsLargeUtf8(buf []byte, offset flatbuffers.UOffsetT) *LargeUtf8 { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LargeUtf8{} + x.Init(buf, n+offset) + return x +} + +func (rcv *LargeUtf8) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LargeUtf8) Table() flatbuffers.Table { + return rcv._tab +} + +func LargeUtf8Start(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func LargeUtf8End(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/List.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/List.go new file mode 100644 index 00000000..ba84319d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/List.go @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type List struct { + _tab flatbuffers.Table +} + +func GetRootAsList(buf []byte, offset flatbuffers.UOffsetT) *List { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &List{} + x.Init(buf, n+offset) + return x +} + +func (rcv *List) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *List) Table() flatbuffers.Table { + return rcv._tab +} + +func ListStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func ListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Map.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Map.go new file mode 100644 index 00000000..8802aba1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Map.go @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A Map is a logical nested type that is represented as +/// +/// List> +/// +/// In this layout, the keys and values are each respectively contiguous. We do +/// not constrain the key and value types, so the application is responsible +/// for ensuring that the keys are hashable and unique. Whether the keys are sorted +/// may be set in the metadata for this field. +/// +/// In a field with Map type, the field has a child Struct field, which then +/// has two children: key type and the second the value type. The names of the +/// child fields may be respectively "entries", "key", and "value", but this is +/// not enforced. +/// +/// Map +/// ```text +/// - child[0] entries: Struct +/// - child[0] key: K +/// - child[1] value: V +/// ``` +/// Neither the "entries" field nor the "key" field may be nullable. +/// +/// The metadata is structured so that Arrow systems without special handling +/// for Map can make Map an alias for List. The "layout" attribute for the Map +/// field must have the same contents as a List. +type Map struct { + _tab flatbuffers.Table +} + +func GetRootAsMap(buf []byte, offset flatbuffers.UOffsetT) *Map { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Map{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Map) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Map) Table() flatbuffers.Table { + return rcv._tab +} + +/// Set to true if the keys within each value are sorted +func (rcv *Map) KeysSorted() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// Set to true if the keys within each value are sorted +func (rcv *Map) MutateKeysSorted(n bool) bool { + return rcv._tab.MutateBoolSlot(4, n) +} + +func MapStart(builder *flatbuffers.Builder) { + builder.StartObject(1) +} +func MapAddKeysSorted(builder *flatbuffers.Builder, keysSorted bool) { + builder.PrependBoolSlot(0, keysSorted, false) +} +func MapEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Message.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Message.go new file mode 100644 index 00000000..f4b4a0ff --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Message.go @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Message struct { + _tab flatbuffers.Table +} + +func GetRootAsMessage(buf []byte, offset flatbuffers.UOffsetT) *Message { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Message{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Message) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Message) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Message) Version() MetadataVersion { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return MetadataVersion(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Message) MutateVersion(n MetadataVersion) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func (rcv *Message) HeaderType() MessageHeader { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return MessageHeader(rcv._tab.GetByte(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Message) MutateHeaderType(n MessageHeader) bool { + return rcv._tab.MutateByteSlot(6, byte(n)) +} + +func (rcv *Message) Header(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +func (rcv *Message) BodyLength() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +func (rcv *Message) MutateBodyLength(n int64) bool { + return rcv._tab.MutateInt64Slot(10, n) +} + +func (rcv *Message) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Message) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func MessageStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func MessageAddVersion(builder *flatbuffers.Builder, version MetadataVersion) { + builder.PrependInt16Slot(0, int16(version), 0) +} +func MessageAddHeaderType(builder *flatbuffers.Builder, headerType MessageHeader) { + builder.PrependByteSlot(1, byte(headerType), 0) +} +func MessageAddHeader(builder *flatbuffers.Builder, header flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(header), 0) +} +func MessageAddBodyLength(builder *flatbuffers.Builder, bodyLength int64) { + builder.PrependInt64Slot(3, bodyLength, 0) +} +func MessageAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(customMetadata), 0) +} +func MessageStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func MessageEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MessageHeader.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MessageHeader.go new file mode 100644 index 00000000..c12fc105 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MessageHeader.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// ---------------------------------------------------------------------- +/// The root Message type +/// This union enables us to easily send different message types without +/// redundant storage, and in the future we can easily add new message types. +/// +/// Arrow implementations do not need to implement all of the message types, +/// which may include experimental metadata types. For maximum compatibility, +/// it is best to send data using RecordBatch +type MessageHeader byte + +const ( + MessageHeaderNONE MessageHeader = 0 + MessageHeaderSchema MessageHeader = 1 + MessageHeaderDictionaryBatch MessageHeader = 2 + MessageHeaderRecordBatch MessageHeader = 3 + MessageHeaderTensor MessageHeader = 4 + MessageHeaderSparseTensor MessageHeader = 5 +) + +var EnumNamesMessageHeader = map[MessageHeader]string{ + MessageHeaderNONE: "NONE", + MessageHeaderSchema: "Schema", + MessageHeaderDictionaryBatch: "DictionaryBatch", + MessageHeaderRecordBatch: "RecordBatch", + MessageHeaderTensor: "Tensor", + MessageHeaderSparseTensor: "SparseTensor", +} + +var EnumValuesMessageHeader = map[string]MessageHeader{ + "NONE": MessageHeaderNONE, + "Schema": MessageHeaderSchema, + "DictionaryBatch": MessageHeaderDictionaryBatch, + "RecordBatch": MessageHeaderRecordBatch, + "Tensor": MessageHeaderTensor, + "SparseTensor": MessageHeaderSparseTensor, +} + +func (v MessageHeader) String() string { + if s, ok := EnumNamesMessageHeader[v]; ok { + return s + } + return "MessageHeader(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MetadataVersion.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MetadataVersion.go new file mode 100644 index 00000000..21b234f9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MetadataVersion.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type MetadataVersion int16 + +const ( + /// 0.1.0 (October 2016). + MetadataVersionV1 MetadataVersion = 0 + /// 0.2.0 (February 2017). Non-backwards compatible with V1. + MetadataVersionV2 MetadataVersion = 1 + /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2. + MetadataVersionV3 MetadataVersion = 2 + /// >= 0.8.0 (December 2017). Non-backwards compatible with V3. + MetadataVersionV4 MetadataVersion = 3 + /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4 + /// metadata and IPC messages). Implementations are recommended to provide a + /// V4 compatibility mode with V5 format changes disabled. + /// + /// Incompatible changes between V4 and V5: + /// - Union buffer layout has changed. In V5, Unions don't have a validity + /// bitmap buffer. + MetadataVersionV5 MetadataVersion = 4 +) + +var EnumNamesMetadataVersion = map[MetadataVersion]string{ + MetadataVersionV1: "V1", + MetadataVersionV2: "V2", + MetadataVersionV3: "V3", + MetadataVersionV4: "V4", + MetadataVersionV5: "V5", +} + +var EnumValuesMetadataVersion = map[string]MetadataVersion{ + "V1": MetadataVersionV1, + "V2": MetadataVersionV2, + "V3": MetadataVersionV3, + "V4": MetadataVersionV4, + "V5": MetadataVersionV5, +} + +func (v MetadataVersion) String() string { + if s, ok := EnumNamesMetadataVersion[v]; ok { + return s + } + return "MetadataVersion(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Null.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Null.go new file mode 100644 index 00000000..3c3eb4bd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Null.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// These are stored in the flatbuffer in the Type union below +type Null struct { + _tab flatbuffers.Table +} + +func GetRootAsNull(buf []byte, offset flatbuffers.UOffsetT) *Null { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Null{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Null) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Null) Table() flatbuffers.Table { + return rcv._tab +} + +func NullStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func NullEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Precision.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Precision.go new file mode 100644 index 00000000..d8021ccc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Precision.go @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type Precision int16 + +const ( + PrecisionHALF Precision = 0 + PrecisionSINGLE Precision = 1 + PrecisionDOUBLE Precision = 2 +) + +var EnumNamesPrecision = map[Precision]string{ + PrecisionHALF: "HALF", + PrecisionSINGLE: "SINGLE", + PrecisionDOUBLE: "DOUBLE", +} + +var EnumValuesPrecision = map[string]Precision{ + "HALF": PrecisionHALF, + "SINGLE": PrecisionSINGLE, + "DOUBLE": PrecisionDOUBLE, +} + +func (v Precision) String() string { + if s, ok := EnumNamesPrecision[v]; ok { + return s + } + return "Precision(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go new file mode 100644 index 00000000..bb6aca9a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A data header describing the shared memory layout of a "record" or "row" +/// batch. Some systems call this a "row batch" internally and others a "record +/// batch". +type RecordBatch struct { + _tab flatbuffers.Table +} + +func GetRootAsRecordBatch(buf []byte, offset flatbuffers.UOffsetT) *RecordBatch { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RecordBatch{} + x.Init(buf, n+offset) + return x +} + +func (rcv *RecordBatch) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RecordBatch) Table() flatbuffers.Table { + return rcv._tab +} + +/// number of records / rows. The arrays in the batch should all have this +/// length +func (rcv *RecordBatch) Length() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// number of records / rows. The arrays in the batch should all have this +/// length +func (rcv *RecordBatch) MutateLength(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +/// Nodes correspond to the pre-ordered flattened logical schema +func (rcv *RecordBatch) Nodes(obj *FieldNode, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *RecordBatch) NodesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Nodes correspond to the pre-ordered flattened logical schema +/// Buffers correspond to the pre-ordered flattened buffer tree +/// +/// The number of buffers appended to this list depends on the schema. For +/// example, most primitive arrays will have 2 buffers, 1 for the validity +/// bitmap and 1 for the values. For struct arrays, there will only be a +/// single buffer for the validity (nulls) bitmap +func (rcv *RecordBatch) Buffers(obj *Buffer, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *RecordBatch) BuffersLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Buffers correspond to the pre-ordered flattened buffer tree +/// +/// The number of buffers appended to this list depends on the schema. For +/// example, most primitive arrays will have 2 buffers, 1 for the validity +/// bitmap and 1 for the values. For struct arrays, there will only be a +/// single buffer for the validity (nulls) bitmap +/// Optional compression of the message body +func (rcv *RecordBatch) Compression(obj *BodyCompression) *BodyCompression { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(BodyCompression) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// Optional compression of the message body +func RecordBatchStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func RecordBatchAddLength(builder *flatbuffers.Builder, length int64) { + builder.PrependInt64Slot(0, length, 0) +} +func RecordBatchAddNodes(builder *flatbuffers.Builder, nodes flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(nodes), 0) +} +func RecordBatchStartNodesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func RecordBatchAddBuffers(builder *flatbuffers.Builder, buffers flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(buffers), 0) +} +func RecordBatchStartBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func RecordBatchAddCompression(builder *flatbuffers.Builder, compression flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(compression), 0) +} +func RecordBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunEndEncoded.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunEndEncoded.go new file mode 100644 index 00000000..fa414c1b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunEndEncoded.go @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Contains two child arrays, run_ends and values. +/// The run_ends child array must be a 16/32/64-bit integer array +/// which encodes the indices at which the run with the value in +/// each corresponding index in the values child array ends. +/// Like list/struct types, the value array can be of any type. +type RunEndEncoded struct { + _tab flatbuffers.Table +} + +func GetRootAsRunEndEncoded(buf []byte, offset flatbuffers.UOffsetT) *RunEndEncoded { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RunEndEncoded{} + x.Init(buf, n+offset) + return x +} + +func (rcv *RunEndEncoded) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RunEndEncoded) Table() flatbuffers.Table { + return rcv._tab +} + +func RunEndEncodedStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func RunEndEncodedEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunLengthEncoded.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunLengthEncoded.go new file mode 100644 index 00000000..8822c066 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunLengthEncoded.go @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type RunLengthEncoded struct { + _tab flatbuffers.Table +} + +func GetRootAsRunLengthEncoded(buf []byte, offset flatbuffers.UOffsetT) *RunLengthEncoded { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &RunLengthEncoded{} + x.Init(buf, n+offset) + return x +} + +func (rcv *RunLengthEncoded) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *RunLengthEncoded) Table() flatbuffers.Table { + return rcv._tab +} + +func RunLengthEncodedStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func RunLengthEncodedEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Schema.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Schema.go new file mode 100644 index 00000000..4ee5ecc9 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Schema.go @@ -0,0 +1,159 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// A Schema describes the columns in a row batch +type Schema struct { + _tab flatbuffers.Table +} + +func GetRootAsSchema(buf []byte, offset flatbuffers.UOffsetT) *Schema { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Schema{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Schema) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Schema) Table() flatbuffers.Table { + return rcv._tab +} + +/// endianness of the buffer +/// it is Little Endian by default +/// if endianness doesn't match the underlying system then the vectors need to be converted +func (rcv *Schema) Endianness() Endianness { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return Endianness(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +/// endianness of the buffer +/// it is Little Endian by default +/// if endianness doesn't match the underlying system then the vectors need to be converted +func (rcv *Schema) MutateEndianness(n Endianness) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func (rcv *Schema) Fields(obj *Field, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Schema) FieldsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Schema) CustomMetadata(obj *KeyValue, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Schema) CustomMetadataLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Features used in the stream/file. +func (rcv *Schema) Features(j int) Feature { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return Feature(rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8))) + } + return 0 +} + +func (rcv *Schema) FeaturesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Features used in the stream/file. +func (rcv *Schema) MutateFeatures(j int, n Feature) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), int64(n)) + } + return false +} + +func SchemaStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func SchemaAddEndianness(builder *flatbuffers.Builder, endianness Endianness) { + builder.PrependInt16Slot(0, int16(endianness), 0) +} +func SchemaAddFields(builder *flatbuffers.Builder, fields flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(fields), 0) +} +func SchemaStartFieldsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SchemaAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(customMetadata), 0) +} +func SchemaStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SchemaAddFeatures(builder *flatbuffers.Builder, features flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(features), 0) +} +func SchemaStartFeaturesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func SchemaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go new file mode 100644 index 00000000..2d86fdef --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type SparseMatrixCompressedAxis int16 + +const ( + SparseMatrixCompressedAxisRow SparseMatrixCompressedAxis = 0 + SparseMatrixCompressedAxisColumn SparseMatrixCompressedAxis = 1 +) + +var EnumNamesSparseMatrixCompressedAxis = map[SparseMatrixCompressedAxis]string{ + SparseMatrixCompressedAxisRow: "Row", + SparseMatrixCompressedAxisColumn: "Column", +} + +var EnumValuesSparseMatrixCompressedAxis = map[string]SparseMatrixCompressedAxis{ + "Row": SparseMatrixCompressedAxisRow, + "Column": SparseMatrixCompressedAxisColumn, +} + +func (v SparseMatrixCompressedAxis) String() string { + if s, ok := EnumNamesSparseMatrixCompressedAxis[v]; ok { + return s + } + return "SparseMatrixCompressedAxis(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSR.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSR.go new file mode 100644 index 00000000..de821765 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSR.go @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Compressed Sparse Row format, that is matrix-specific. +type SparseMatrixIndexCSR struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseMatrixIndexCSR(buf []byte, offset flatbuffers.UOffsetT) *SparseMatrixIndexCSR { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseMatrixIndexCSR{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseMatrixIndexCSR) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseMatrixIndexCSR) Table() flatbuffers.Table { + return rcv._tab +} + +/// The type of values in indptrBuffer +func (rcv *SparseMatrixIndexCSR) IndptrType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indptrBuffer +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from indptr[i] to indptr[i+1] in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// +/// The array of non-zero values in X is: +/// +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// +/// And the indptr of X is: +/// +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from indptr[i] to indptr[i+1] in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// +/// The array of non-zero values in X is: +/// +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// +/// And the indptr of X is: +/// +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +/// The type of values in indicesBuffer +func (rcv *SparseMatrixIndexCSR) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffer +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// +/// Note that the indices are sorted in lexicographical order for each row. +func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// +/// Note that the indices are sorted in lexicographical order for each row. +func SparseMatrixIndexCSRStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func SparseMatrixIndexCSRAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indptrType), 0) +} +func SparseMatrixIndexCSRAddIndptrBuffer(builder *flatbuffers.Builder, indptrBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(1, flatbuffers.UOffsetT(indptrBuffer), 0) +} +func SparseMatrixIndexCSRAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseMatrixIndexCSRAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(3, flatbuffers.UOffsetT(indicesBuffer), 0) +} +func SparseMatrixIndexCSREnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSX.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSX.go new file mode 100644 index 00000000..c28cc5d0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSX.go @@ -0,0 +1,200 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Compressed Sparse format, that is matrix-specific. +type SparseMatrixIndexCSX struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseMatrixIndexCSX(buf []byte, offset flatbuffers.UOffsetT) *SparseMatrixIndexCSX { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseMatrixIndexCSX{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseMatrixIndexCSX) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseMatrixIndexCSX) Table() flatbuffers.Table { + return rcv._tab +} + +/// Which axis, row or column, is compressed +func (rcv *SparseMatrixIndexCSX) CompressedAxis() SparseMatrixCompressedAxis { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return SparseMatrixCompressedAxis(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +/// Which axis, row or column, is compressed +func (rcv *SparseMatrixIndexCSX) MutateCompressedAxis(n SparseMatrixCompressedAxis) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +/// The type of values in indptrBuffer +func (rcv *SparseMatrixIndexCSX) IndptrType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indptrBuffer +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// ```text +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// ``` +/// The array of non-zero values in X is: +/// ```text +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// ``` +/// And the indptr of X is: +/// ```text +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +/// ``` +func (rcv *SparseMatrixIndexCSX) IndptrBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indptrBuffer stores the location and size of indptr array that +/// represents the range of the rows. +/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. +/// The length of this array is 1 + (the number of rows), and the type +/// of index value is long. +/// +/// For example, let X be the following 6x4 matrix: +/// ```text +/// X := [[0, 1, 2, 0], +/// [0, 0, 3, 0], +/// [0, 4, 0, 5], +/// [0, 0, 0, 0], +/// [6, 0, 7, 8], +/// [0, 9, 0, 0]]. +/// ``` +/// The array of non-zero values in X is: +/// ```text +/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +/// ``` +/// And the indptr of X is: +/// ```text +/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +/// ``` +/// The type of values in indicesBuffer +func (rcv *SparseMatrixIndexCSX) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffer +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// ```text +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// ``` +/// Note that the indices are sorted in lexicographical order for each row. +func (rcv *SparseMatrixIndexCSX) IndicesBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// indicesBuffer stores the location and size of the array that +/// contains the column indices of the corresponding non-zero values. +/// The type of index value is long. +/// +/// For example, the indices of the above X is: +/// ```text +/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +/// ``` +/// Note that the indices are sorted in lexicographical order for each row. +func SparseMatrixIndexCSXStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func SparseMatrixIndexCSXAddCompressedAxis(builder *flatbuffers.Builder, compressedAxis SparseMatrixCompressedAxis) { + builder.PrependInt16Slot(0, int16(compressedAxis), 0) +} +func SparseMatrixIndexCSXAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indptrType), 0) +} +func SparseMatrixIndexCSXAddIndptrBuffer(builder *flatbuffers.Builder, indptrBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(2, flatbuffers.UOffsetT(indptrBuffer), 0) +} +func SparseMatrixIndexCSXAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseMatrixIndexCSXAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(4, flatbuffers.UOffsetT(indicesBuffer), 0) +} +func SparseMatrixIndexCSXEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensor.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensor.go new file mode 100644 index 00000000..6f3f5579 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensor.go @@ -0,0 +1,175 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type SparseTensor struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseTensor(buf []byte, offset flatbuffers.UOffsetT) *SparseTensor { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseTensor{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseTensor) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseTensor) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *SparseTensor) TypeType() Type { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return Type(rcv._tab.GetByte(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *SparseTensor) MutateTypeType(n Type) bool { + return rcv._tab.MutateByteSlot(4, byte(n)) +} + +/// The type of data contained in a value cell. +/// Currently only fixed-width value types are supported, +/// no strings or nested types. +func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// The type of data contained in a value cell. +/// Currently only fixed-width value types are supported, +/// no strings or nested types. +/// The dimensions of the tensor, optionally named. +func (rcv *SparseTensor) Shape(obj *TensorDim, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SparseTensor) ShapeLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// The dimensions of the tensor, optionally named. +/// The number of non-zero values in a sparse tensor. +func (rcv *SparseTensor) NonZeroLength() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// The number of non-zero values in a sparse tensor. +func (rcv *SparseTensor) MutateNonZeroLength(n int64) bool { + return rcv._tab.MutateInt64Slot(10, n) +} + +func (rcv *SparseTensor) SparseIndexType() SparseTensorIndex { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return SparseTensorIndex(rcv._tab.GetByte(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *SparseTensor) MutateSparseIndexType(n SparseTensorIndex) bool { + return rcv._tab.MutateByteSlot(12, byte(n)) +} + +/// Sparse tensor index +func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// Sparse tensor index +/// The location and size of the tensor's data +func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the tensor's data +func SparseTensorStart(builder *flatbuffers.Builder) { + builder.StartObject(7) +} +func SparseTensorAddTypeType(builder *flatbuffers.Builder, typeType Type) { + builder.PrependByteSlot(0, byte(typeType), 0) +} +func SparseTensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0) +} +func SparseTensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0) +} +func SparseTensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SparseTensorAddNonZeroLength(builder *flatbuffers.Builder, nonZeroLength int64) { + builder.PrependInt64Slot(3, nonZeroLength, 0) +} +func SparseTensorAddSparseIndexType(builder *flatbuffers.Builder, sparseIndexType SparseTensorIndex) { + builder.PrependByteSlot(4, byte(sparseIndexType), 0) +} +func SparseTensorAddSparseIndex(builder *flatbuffers.Builder, sparseIndex flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(sparseIndex), 0) +} +func SparseTensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { + builder.PrependStructSlot(6, flatbuffers.UOffsetT(data), 0) +} +func SparseTensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndex.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndex.go new file mode 100644 index 00000000..42aa818b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndex.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type SparseTensorIndex byte + +const ( + SparseTensorIndexNONE SparseTensorIndex = 0 + SparseTensorIndexSparseTensorIndexCOO SparseTensorIndex = 1 + SparseTensorIndexSparseMatrixIndexCSX SparseTensorIndex = 2 + SparseTensorIndexSparseTensorIndexCSF SparseTensorIndex = 3 +) + +var EnumNamesSparseTensorIndex = map[SparseTensorIndex]string{ + SparseTensorIndexNONE: "NONE", + SparseTensorIndexSparseTensorIndexCOO: "SparseTensorIndexCOO", + SparseTensorIndexSparseMatrixIndexCSX: "SparseMatrixIndexCSX", + SparseTensorIndexSparseTensorIndexCSF: "SparseTensorIndexCSF", +} + +var EnumValuesSparseTensorIndex = map[string]SparseTensorIndex{ + "NONE": SparseTensorIndexNONE, + "SparseTensorIndexCOO": SparseTensorIndexSparseTensorIndexCOO, + "SparseMatrixIndexCSX": SparseTensorIndexSparseMatrixIndexCSX, + "SparseTensorIndexCSF": SparseTensorIndexSparseTensorIndexCSF, +} + +func (v SparseTensorIndex) String() string { + if s, ok := EnumNamesSparseTensorIndex[v]; ok { + return s + } + return "SparseTensorIndex(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCOO.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCOO.go new file mode 100644 index 00000000..f8eee99f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCOO.go @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// EXPERIMENTAL: Data structures for sparse tensors +/// Coordinate (COO) format of sparse tensor index. +/// +/// COO's index list are represented as a NxM matrix, +/// where N is the number of non-zero values, +/// and M is the number of dimensions of a sparse tensor. +/// +/// indicesBuffer stores the location and size of the data of this indices +/// matrix. The value type and the stride of the indices matrix is +/// specified in indicesType and indicesStrides fields. +/// +/// For example, let X be a 2x3x4x5 tensor, and it has the following +/// 6 non-zero values: +/// ```text +/// X[0, 1, 2, 0] := 1 +/// X[1, 1, 2, 3] := 2 +/// X[0, 2, 1, 0] := 3 +/// X[0, 1, 3, 0] := 4 +/// X[0, 1, 2, 1] := 5 +/// X[1, 2, 0, 4] := 6 +/// ``` +/// In COO format, the index matrix of X is the following 4x6 matrix: +/// ```text +/// [[0, 0, 0, 0, 1, 1], +/// [1, 1, 1, 2, 1, 2], +/// [2, 2, 3, 1, 2, 0], +/// [0, 1, 0, 0, 3, 4]] +/// ``` +/// When isCanonical is true, the indices is sorted in lexicographical order +/// (row-major order), and it does not have duplicated entries. Otherwise, +/// the indices may not be sorted, or may have duplicated entries. +type SparseTensorIndexCOO struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseTensorIndexCOO(buf []byte, offset flatbuffers.UOffsetT) *SparseTensorIndexCOO { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseTensorIndexCOO{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseTensorIndexCOO) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseTensorIndexCOO) Table() flatbuffers.Table { + return rcv._tab +} + +/// The type of values in indicesBuffer +func (rcv *SparseTensorIndexCOO) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffer +/// Non-negative byte offsets to advance one value cell along each dimension +/// If omitted, default to row-major order (C-like). +func (rcv *SparseTensorIndexCOO) IndicesStrides(j int) int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *SparseTensorIndexCOO) IndicesStridesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Non-negative byte offsets to advance one value cell along each dimension +/// If omitted, default to row-major order (C-like). +func (rcv *SparseTensorIndexCOO) MutateIndicesStrides(j int, n int64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + +/// The location and size of the indices matrix's data +func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the indices matrix's data +/// This flag is true if and only if the indices matrix is sorted in +/// row-major order, and does not have duplicated entries. +/// This sort order is the same as of Tensorflow's SparseTensor, +/// but it is inverse order of SciPy's canonical coo_matrix +/// (SciPy employs column-major order for its coo_matrix). +func (rcv *SparseTensorIndexCOO) IsCanonical() bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +/// This flag is true if and only if the indices matrix is sorted in +/// row-major order, and does not have duplicated entries. +/// This sort order is the same as of Tensorflow's SparseTensor, +/// but it is inverse order of SciPy's canonical coo_matrix +/// (SciPy employs column-major order for its coo_matrix). +func (rcv *SparseTensorIndexCOO) MutateIsCanonical(n bool) bool { + return rcv._tab.MutateBoolSlot(10, n) +} + +func SparseTensorIndexCOOStart(builder *flatbuffers.Builder) { + builder.StartObject(4) +} +func SparseTensorIndexCOOAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseTensorIndexCOOAddIndicesStrides(builder *flatbuffers.Builder, indicesStrides flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indicesStrides), 0) +} +func SparseTensorIndexCOOStartIndicesStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func SparseTensorIndexCOOAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) { + builder.PrependStructSlot(2, flatbuffers.UOffsetT(indicesBuffer), 0) +} +func SparseTensorIndexCOOAddIsCanonical(builder *flatbuffers.Builder, isCanonical bool) { + builder.PrependBoolSlot(3, isCanonical, false) +} +func SparseTensorIndexCOOEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCSF.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCSF.go new file mode 100644 index 00000000..a824c84e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCSF.go @@ -0,0 +1,291 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Compressed Sparse Fiber (CSF) sparse tensor index. +type SparseTensorIndexCSF struct { + _tab flatbuffers.Table +} + +func GetRootAsSparseTensorIndexCSF(buf []byte, offset flatbuffers.UOffsetT) *SparseTensorIndexCSF { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &SparseTensorIndexCSF{} + x.Init(buf, n+offset) + return x +} + +func (rcv *SparseTensorIndexCSF) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *SparseTensorIndexCSF) Table() flatbuffers.Table { + return rcv._tab +} + +/// CSF is a generalization of compressed sparse row (CSR) index. +/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) +/// +/// CSF index recursively compresses each dimension of a tensor into a set +/// of prefix trees. Each path from a root to leaf forms one tensor +/// non-zero index. CSF is implemented with two arrays of buffers and one +/// arrays of integers. +/// +/// For example, let X be a 2x3x4x5 tensor and let it have the following +/// 8 non-zero values: +/// ```text +/// X[0, 0, 0, 1] := 1 +/// X[0, 0, 0, 2] := 2 +/// X[0, 1, 0, 0] := 3 +/// X[0, 1, 0, 2] := 4 +/// X[0, 1, 1, 0] := 5 +/// X[1, 1, 1, 0] := 6 +/// X[1, 1, 1, 1] := 7 +/// X[1, 1, 1, 2] := 8 +/// ``` +/// As a prefix tree this would be represented as: +/// ```text +/// 0 1 +/// / \ | +/// 0 1 1 +/// / / \ | +/// 0 0 1 1 +/// /| /| | /| | +/// 1 2 0 2 0 0 1 2 +/// ``` +/// The type of values in indptrBuffers +func (rcv *SparseTensorIndexCSF) IndptrType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// CSF is a generalization of compressed sparse row (CSR) index. +/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) +/// +/// CSF index recursively compresses each dimension of a tensor into a set +/// of prefix trees. Each path from a root to leaf forms one tensor +/// non-zero index. CSF is implemented with two arrays of buffers and one +/// arrays of integers. +/// +/// For example, let X be a 2x3x4x5 tensor and let it have the following +/// 8 non-zero values: +/// ```text +/// X[0, 0, 0, 1] := 1 +/// X[0, 0, 0, 2] := 2 +/// X[0, 1, 0, 0] := 3 +/// X[0, 1, 0, 2] := 4 +/// X[0, 1, 1, 0] := 5 +/// X[1, 1, 1, 0] := 6 +/// X[1, 1, 1, 1] := 7 +/// X[1, 1, 1, 2] := 8 +/// ``` +/// As a prefix tree this would be represented as: +/// ```text +/// 0 1 +/// / \ | +/// 0 1 1 +/// / / \ | +/// 0 0 1 1 +/// /| /| | /| | +/// 1 2 0 2 0 0 1 2 +/// ``` +/// The type of values in indptrBuffers +/// indptrBuffers stores the sparsity structure. +/// Each two consecutive dimensions in a tensor correspond to a buffer in +/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` +/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in +/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. +/// +/// For example, the indptrBuffers for the above X is: +/// ```text +/// indptrBuffer(X) = [ +/// [0, 2, 3], +/// [0, 1, 3, 4], +/// [0, 2, 4, 5, 8] +/// ]. +/// ``` +func (rcv *SparseTensorIndexCSF) IndptrBuffers(obj *Buffer, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SparseTensorIndexCSF) IndptrBuffersLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// indptrBuffers stores the sparsity structure. +/// Each two consecutive dimensions in a tensor correspond to a buffer in +/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` +/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in +/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. +/// +/// For example, the indptrBuffers for the above X is: +/// ```text +/// indptrBuffer(X) = [ +/// [0, 2, 3], +/// [0, 1, 3, 4], +/// [0, 2, 4, 5, 8] +/// ]. +/// ``` +/// The type of values in indicesBuffers +func (rcv *SparseTensorIndexCSF) IndicesType(obj *Int) *Int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Indirect(o + rcv._tab.Pos) + if obj == nil { + obj = new(Int) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The type of values in indicesBuffers +/// indicesBuffers stores values of nodes. +/// Each tensor dimension corresponds to a buffer in indicesBuffers. +/// For example, the indicesBuffers for the above X is: +/// ```text +/// indicesBuffer(X) = [ +/// [0, 1], +/// [0, 1, 1], +/// [0, 0, 1, 1], +/// [1, 2, 0, 2, 0, 0, 1, 2] +/// ]. +/// ``` +func (rcv *SparseTensorIndexCSF) IndicesBuffers(obj *Buffer, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 16 + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *SparseTensorIndexCSF) IndicesBuffersLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// indicesBuffers stores values of nodes. +/// Each tensor dimension corresponds to a buffer in indicesBuffers. +/// For example, the indicesBuffers for the above X is: +/// ```text +/// indicesBuffer(X) = [ +/// [0, 1], +/// [0, 1, 1], +/// [0, 0, 1, 1], +/// [1, 2, 0, 2, 0, 0, 1, 2] +/// ]. +/// ``` +/// axisOrder stores the sequence in which dimensions were traversed to +/// produce the prefix tree. +/// For example, the axisOrder for the above X is: +/// ```text +/// axisOrder(X) = [0, 1, 2, 3]. +/// ``` +func (rcv *SparseTensorIndexCSF) AxisOrder(j int) int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) + } + return 0 +} + +func (rcv *SparseTensorIndexCSF) AxisOrderLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// axisOrder stores the sequence in which dimensions were traversed to +/// produce the prefix tree. +/// For example, the axisOrder for the above X is: +/// ```text +/// axisOrder(X) = [0, 1, 2, 3]. +/// ``` +func (rcv *SparseTensorIndexCSF) MutateAxisOrder(j int, n int32) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n) + } + return false +} + +func SparseTensorIndexCSFStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func SparseTensorIndexCSFAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indptrType), 0) +} +func SparseTensorIndexCSFAddIndptrBuffers(builder *flatbuffers.Builder, indptrBuffers flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indptrBuffers), 0) +} +func SparseTensorIndexCSFStartIndptrBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func SparseTensorIndexCSFAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(indicesType), 0) +} +func SparseTensorIndexCSFAddIndicesBuffers(builder *flatbuffers.Builder, indicesBuffers flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(indicesBuffers), 0) +} +func SparseTensorIndexCSFStartIndicesBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(16, numElems, 8) +} +func SparseTensorIndexCSFAddAxisOrder(builder *flatbuffers.Builder, axisOrder flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(axisOrder), 0) +} +func SparseTensorIndexCSFStartAxisOrderVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func SparseTensorIndexCSFEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Struct_.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Struct_.go new file mode 100644 index 00000000..427e7060 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Struct_.go @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct +/// (according to the physical memory layout). We used Struct_ here as +/// Struct is a reserved word in Flatbuffers +type Struct_ struct { + _tab flatbuffers.Table +} + +func GetRootAsStruct_(buf []byte, offset flatbuffers.UOffsetT) *Struct_ { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Struct_{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Struct_) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Struct_) Table() flatbuffers.Table { + return rcv._tab +} + +func Struct_Start(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func Struct_End(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Tensor.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Tensor.go new file mode 100644 index 00000000..39d70e35 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Tensor.go @@ -0,0 +1,163 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +type Tensor struct { + _tab flatbuffers.Table +} + +func GetRootAsTensor(buf []byte, offset flatbuffers.UOffsetT) *Tensor { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Tensor{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Tensor) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Tensor) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Tensor) TypeType() Type { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return Type(rcv._tab.GetByte(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Tensor) MutateTypeType(n Type) bool { + return rcv._tab.MutateByteSlot(4, byte(n)) +} + +/// The type of data contained in a value cell. Currently only fixed-width +/// value types are supported, no strings or nested types +func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + rcv._tab.Union(obj, o) + return true + } + return false +} + +/// The type of data contained in a value cell. Currently only fixed-width +/// value types are supported, no strings or nested types +/// The dimensions of the tensor, optionally named +func (rcv *Tensor) Shape(obj *TensorDim, j int) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + x := rcv._tab.Vector(o) + x += flatbuffers.UOffsetT(j) * 4 + x = rcv._tab.Indirect(x) + obj.Init(rcv._tab.Bytes, x) + return true + } + return false +} + +func (rcv *Tensor) ShapeLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// The dimensions of the tensor, optionally named +/// Non-negative byte offsets to advance one value cell along each dimension +/// If omitted, default to row-major order (C-like). +func (rcv *Tensor) Strides(j int) int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *Tensor) StridesLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Non-negative byte offsets to advance one value cell along each dimension +/// If omitted, default to row-major order (C-like). +func (rcv *Tensor) MutateStrides(j int, n int64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + +/// The location and size of the tensor's data +func (rcv *Tensor) Data(obj *Buffer) *Buffer { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + x := o + rcv._tab.Pos + if obj == nil { + obj = new(Buffer) + } + obj.Init(rcv._tab.Bytes, x) + return obj + } + return nil +} + +/// The location and size of the tensor's data +func TensorStart(builder *flatbuffers.Builder) { + builder.StartObject(5) +} +func TensorAddTypeType(builder *flatbuffers.Builder, typeType Type) { + builder.PrependByteSlot(0, byte(typeType), 0) +} +func TensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0) +} +func TensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0) +} +func TensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func TensorAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(strides), 0) +} +func TensorStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} +func TensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { + builder.PrependStructSlot(4, flatbuffers.UOffsetT(data), 0) +} +func TensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TensorDim.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TensorDim.go new file mode 100644 index 00000000..14b82120 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TensorDim.go @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// ---------------------------------------------------------------------- +/// Data structures for dense tensors +/// Shape data for a single axis in a tensor +type TensorDim struct { + _tab flatbuffers.Table +} + +func GetRootAsTensorDim(buf []byte, offset flatbuffers.UOffsetT) *TensorDim { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &TensorDim{} + x.Init(buf, n+offset) + return x +} + +func (rcv *TensorDim) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *TensorDim) Table() flatbuffers.Table { + return rcv._tab +} + +/// Length of dimension +func (rcv *TensorDim) Size() int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return rcv._tab.GetInt64(o + rcv._tab.Pos) + } + return 0 +} + +/// Length of dimension +func (rcv *TensorDim) MutateSize(n int64) bool { + return rcv._tab.MutateInt64Slot(4, n) +} + +/// Name of the dimension, optional +func (rcv *TensorDim) Name() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +/// Name of the dimension, optional +func TensorDimStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TensorDimAddSize(builder *flatbuffers.Builder, size int64) { + builder.PrependInt64Slot(0, size, 0) +} +func TensorDimAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(name), 0) +} +func TensorDimEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Time.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Time.go new file mode 100644 index 00000000..2fb6e4c1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Time.go @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Time is either a 32-bit or 64-bit signed integer type representing an +/// elapsed time since midnight, stored in either of four units: seconds, +/// milliseconds, microseconds or nanoseconds. +/// +/// The integer `bitWidth` depends on the `unit` and must be one of the following: +/// * SECOND and MILLISECOND: 32 bits +/// * MICROSECOND and NANOSECOND: 64 bits +/// +/// The allowed values are between 0 (inclusive) and 86400 (=24*60*60) seconds +/// (exclusive), adjusted for the time unit (for example, up to 86400000 +/// exclusive for the MILLISECOND unit). +/// This definition doesn't allow for leap seconds. Time values from +/// measurements with leap seconds will need to be corrected when ingesting +/// into Arrow (for example by replacing the value 86400 with 86399). +type Time struct { + _tab flatbuffers.Table +} + +func GetRootAsTime(buf []byte, offset flatbuffers.UOffsetT) *Time { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Time{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Time) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Time) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Time) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 1 +} + +func (rcv *Time) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func (rcv *Time) BitWidth() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.GetInt32(o + rcv._tab.Pos) + } + return 32 +} + +func (rcv *Time) MutateBitWidth(n int32) bool { + return rcv._tab.MutateInt32Slot(6, n) +} + +func TimeStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TimeAddUnit(builder *flatbuffers.Builder, unit TimeUnit) { + builder.PrependInt16Slot(0, int16(unit), 1) +} +func TimeAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) { + builder.PrependInt32Slot(1, bitWidth, 32) +} +func TimeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TimeUnit.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TimeUnit.go new file mode 100644 index 00000000..df14ece4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TimeUnit.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type TimeUnit int16 + +const ( + TimeUnitSECOND TimeUnit = 0 + TimeUnitMILLISECOND TimeUnit = 1 + TimeUnitMICROSECOND TimeUnit = 2 + TimeUnitNANOSECOND TimeUnit = 3 +) + +var EnumNamesTimeUnit = map[TimeUnit]string{ + TimeUnitSECOND: "SECOND", + TimeUnitMILLISECOND: "MILLISECOND", + TimeUnitMICROSECOND: "MICROSECOND", + TimeUnitNANOSECOND: "NANOSECOND", +} + +var EnumValuesTimeUnit = map[string]TimeUnit{ + "SECOND": TimeUnitSECOND, + "MILLISECOND": TimeUnitMILLISECOND, + "MICROSECOND": TimeUnitMICROSECOND, + "NANOSECOND": TimeUnitNANOSECOND, +} + +func (v TimeUnit) String() string { + if s, ok := EnumNamesTimeUnit[v]; ok { + return s + } + return "TimeUnit(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Timestamp.go new file mode 100644 index 00000000..f5321145 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Timestamp.go @@ -0,0 +1,201 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Timestamp is a 64-bit signed integer representing an elapsed time since a +/// fixed epoch, stored in either of four units: seconds, milliseconds, +/// microseconds or nanoseconds, and is optionally annotated with a timezone. +/// +/// Timestamp values do not include any leap seconds (in other words, all +/// days are considered 86400 seconds long). +/// +/// Timestamps with a non-empty timezone +/// ------------------------------------ +/// +/// If a Timestamp column has a non-empty timezone value, its epoch is +/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in the *UTC* timezone +/// (the Unix epoch), regardless of the Timestamp's own timezone. +/// +/// Therefore, timestamp values with a non-empty timezone correspond to +/// physical points in time together with some additional information about +/// how the data was obtained and/or how to display it (the timezone). +/// +/// For example, the timestamp value 0 with the timezone string "Europe/Paris" +/// corresponds to "January 1st 1970, 00h00" in the UTC timezone, but the +/// application may prefer to display it as "January 1st 1970, 01h00" in +/// the Europe/Paris timezone (which is the same physical point in time). +/// +/// One consequence is that timestamp values with a non-empty timezone +/// can be compared and ordered directly, since they all share the same +/// well-known point of reference (the Unix epoch). +/// +/// Timestamps with an unset / empty timezone +/// ----------------------------------------- +/// +/// If a Timestamp column has no timezone value, its epoch is +/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in an *unknown* timezone. +/// +/// Therefore, timestamp values without a timezone cannot be meaningfully +/// interpreted as physical points in time, but only as calendar / clock +/// indications ("wall clock time") in an unspecified timezone. +/// +/// For example, the timestamp value 0 with an empty timezone string +/// corresponds to "January 1st 1970, 00h00" in an unknown timezone: there +/// is not enough information to interpret it as a well-defined physical +/// point in time. +/// +/// One consequence is that timestamp values without a timezone cannot +/// be reliably compared or ordered, since they may have different points of +/// reference. In particular, it is *not* possible to interpret an unset +/// or empty timezone as the same as "UTC". +/// +/// Conversion between timezones +/// ---------------------------- +/// +/// If a Timestamp column has a non-empty timezone, changing the timezone +/// to a different non-empty value is a metadata-only operation: +/// the timestamp values need not change as their point of reference remains +/// the same (the Unix epoch). +/// +/// However, if a Timestamp column has no timezone value, changing it to a +/// non-empty value requires to think about the desired semantics. +/// One possibility is to assume that the original timestamp values are +/// relative to the epoch of the timezone being set; timestamp values should +/// then adjusted to the Unix epoch (for example, changing the timezone from +/// empty to "Europe/Paris" would require converting the timestamp values +/// from "Europe/Paris" to "UTC", which seems counter-intuitive but is +/// nevertheless correct). +/// +/// Guidelines for encoding data from external libraries +/// ---------------------------------------------------- +/// +/// Date & time libraries often have multiple different data types for temporal +/// data. In order to ease interoperability between different implementations the +/// Arrow project has some recommendations for encoding these types into a Timestamp +/// column. +/// +/// An "instant" represents a physical point in time that has no relevant timezone +/// (for example, astronomical data). To encode an instant, use a Timestamp with +/// the timezone string set to "UTC", and make sure the Timestamp values +/// are relative to the UTC epoch (January 1st 1970, midnight). +/// +/// A "zoned date-time" represents a physical point in time annotated with an +/// informative timezone (for example, the timezone in which the data was +/// recorded). To encode a zoned date-time, use a Timestamp with the timezone +/// string set to the name of the timezone, and make sure the Timestamp values +/// are relative to the UTC epoch (January 1st 1970, midnight). +/// +/// (There is some ambiguity between an instant and a zoned date-time with the +/// UTC timezone. Both of these are stored the same in Arrow. Typically, +/// this distinction does not matter. If it does, then an application should +/// use custom metadata or an extension type to distinguish between the two cases.) +/// +/// An "offset date-time" represents a physical point in time combined with an +/// explicit offset from UTC. To encode an offset date-time, use a Timestamp +/// with the timezone string set to the numeric timezone offset string +/// (e.g. "+03:00"), and make sure the Timestamp values are relative to +/// the UTC epoch (January 1st 1970, midnight). +/// +/// A "naive date-time" (also called "local date-time" in some libraries) +/// represents a wall clock time combined with a calendar date, but with +/// no indication of how to map this information to a physical point in time. +/// Naive date-times must be handled with care because of this missing +/// information, and also because daylight saving time (DST) may make +/// some values ambiguous or non-existent. A naive date-time may be +/// stored as a struct with Date and Time fields. However, it may also be +/// encoded into a Timestamp column with an empty timezone. The timestamp +/// values should be computed "as if" the timezone of the date-time values +/// was UTC; for example, the naive date-time "January 1st 1970, 00h00" would +/// be encoded as timestamp value 0. +type Timestamp struct { + _tab flatbuffers.Table +} + +func GetRootAsTimestamp(buf []byte, offset flatbuffers.UOffsetT) *Timestamp { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Timestamp{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Timestamp) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Timestamp) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Timestamp) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Timestamp) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +/// The timezone is an optional string indicating the name of a timezone, +/// one of: +/// +/// * As used in the Olson timezone database (the "tz database" or +/// "tzdata"), such as "America/New_York". +/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", +/// such as "+07:30". +/// +/// Whether a timezone string is present indicates different semantics about +/// the data (see above). +func (rcv *Timestamp) Timezone() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +/// The timezone is an optional string indicating the name of a timezone, +/// one of: +/// +/// * As used in the Olson timezone database (the "tz database" or +/// "tzdata"), such as "America/New_York". +/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", +/// such as "+07:30". +/// +/// Whether a timezone string is present indicates different semantics about +/// the data (see above). +func TimestampStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TimestampAddUnit(builder *flatbuffers.Builder, unit TimeUnit) { + builder.PrependInt16Slot(0, int16(unit), 0) +} +func TimestampAddTimezone(builder *flatbuffers.Builder, timezone flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(timezone), 0) +} +func TimestampEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go new file mode 100644 index 00000000..707444b8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +/// ---------------------------------------------------------------------- +/// Top-level Type value, enabling extensible type-specific metadata. We can +/// add new logical types to Type without breaking backwards compatibility +type Type byte + +const ( + TypeNONE Type = 0 + TypeNull Type = 1 + TypeInt Type = 2 + TypeFloatingPoint Type = 3 + TypeBinary Type = 4 + TypeUtf8 Type = 5 + TypeBool Type = 6 + TypeDecimal Type = 7 + TypeDate Type = 8 + TypeTime Type = 9 + TypeTimestamp Type = 10 + TypeInterval Type = 11 + TypeList Type = 12 + TypeStruct_ Type = 13 + TypeUnion Type = 14 + TypeFixedSizeBinary Type = 15 + TypeFixedSizeList Type = 16 + TypeMap Type = 17 + TypeDuration Type = 18 + TypeLargeBinary Type = 19 + TypeLargeUtf8 Type = 20 + TypeLargeList Type = 21 + TypeRunEndEncoded Type = 22 +) + +var EnumNamesType = map[Type]string{ + TypeNONE: "NONE", + TypeNull: "Null", + TypeInt: "Int", + TypeFloatingPoint: "FloatingPoint", + TypeBinary: "Binary", + TypeUtf8: "Utf8", + TypeBool: "Bool", + TypeDecimal: "Decimal", + TypeDate: "Date", + TypeTime: "Time", + TypeTimestamp: "Timestamp", + TypeInterval: "Interval", + TypeList: "List", + TypeStruct_: "Struct_", + TypeUnion: "Union", + TypeFixedSizeBinary: "FixedSizeBinary", + TypeFixedSizeList: "FixedSizeList", + TypeMap: "Map", + TypeDuration: "Duration", + TypeLargeBinary: "LargeBinary", + TypeLargeUtf8: "LargeUtf8", + TypeLargeList: "LargeList", + TypeRunEndEncoded: "RunEndEncoded", +} + +var EnumValuesType = map[string]Type{ + "NONE": TypeNONE, + "Null": TypeNull, + "Int": TypeInt, + "FloatingPoint": TypeFloatingPoint, + "Binary": TypeBinary, + "Utf8": TypeUtf8, + "Bool": TypeBool, + "Decimal": TypeDecimal, + "Date": TypeDate, + "Time": TypeTime, + "Timestamp": TypeTimestamp, + "Interval": TypeInterval, + "List": TypeList, + "Struct_": TypeStruct_, + "Union": TypeUnion, + "FixedSizeBinary": TypeFixedSizeBinary, + "FixedSizeList": TypeFixedSizeList, + "Map": TypeMap, + "Duration": TypeDuration, + "LargeBinary": TypeLargeBinary, + "LargeUtf8": TypeLargeUtf8, + "LargeList": TypeLargeList, + "RunEndEncoded": TypeRunEndEncoded, +} + +func (v Type) String() string { + if s, ok := EnumNamesType[v]; ok { + return s + } + return "Type(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Union.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Union.go new file mode 100644 index 00000000..e34121d4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Union.go @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// A union is a complex type with children in Field +/// By default ids in the type vector refer to the offsets in the children +/// optionally typeIds provides an indirection between the child offset and the type id +/// for each child `typeIds[offset]` is the id used in the type vector +type Union struct { + _tab flatbuffers.Table +} + +func GetRootAsUnion(buf []byte, offset flatbuffers.UOffsetT) *Union { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Union{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Union) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Union) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Union) Mode() UnionMode { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return UnionMode(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Union) MutateMode(n UnionMode) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +func (rcv *Union) TypeIds(j int) int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4)) + } + return 0 +} + +func (rcv *Union) TypeIdsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +func (rcv *Union) MutateTypeIds(j int, n int32) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n) + } + return false +} + +func UnionStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func UnionAddMode(builder *flatbuffers.Builder, mode UnionMode) { + builder.PrependInt16Slot(0, int16(mode), 0) +} +func UnionAddTypeIds(builder *flatbuffers.Builder, typeIds flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(typeIds), 0) +} +func UnionStartTypeIdsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(4, numElems, 4) +} +func UnionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/UnionMode.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/UnionMode.go new file mode 100644 index 00000000..357c1f3c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/UnionMode.go @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import "strconv" + +type UnionMode int16 + +const ( + UnionModeSparse UnionMode = 0 + UnionModeDense UnionMode = 1 +) + +var EnumNamesUnionMode = map[UnionMode]string{ + UnionModeSparse: "Sparse", + UnionModeDense: "Dense", +} + +var EnumValuesUnionMode = map[string]UnionMode{ + "Sparse": UnionModeSparse, + "Dense": UnionModeDense, +} + +func (v UnionMode) String() string { + if s, ok := EnumNamesUnionMode[v]; ok { + return s + } + return "UnionMode(" + strconv.FormatInt(int64(v), 10) + ")" +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Utf8.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Utf8.go new file mode 100644 index 00000000..4ff365a3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Utf8.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Unicode with UTF-8 encoding +type Utf8 struct { + _tab flatbuffers.Table +} + +func GetRootAsUtf8(buf []byte, offset flatbuffers.UOffsetT) *Utf8 { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Utf8{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Utf8) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Utf8) Table() flatbuffers.Table { + return rcv._tab +} + +func Utf8Start(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func Utf8End(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go b/vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go new file mode 100644 index 00000000..c19d2d27 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" +) + +const CurMetadataVersion = flatbuf.MetadataVersionV5 + +// DefaultHasValidityBitmap is a convenience function equivalent to +// calling HasValidityBitmap with CurMetadataVersion. +func DefaultHasValidityBitmap(id arrow.Type) bool { return HasValidityBitmap(id, CurMetadataVersion) } + +// HasValidityBitmap returns whether the given type at the provided version is +// expected to have a validity bitmap in it's representation. +// +// Typically this is necessary because of the change between V4 and V5 +// where union types no longer have validity bitmaps. +func HasValidityBitmap(id arrow.Type, version flatbuf.MetadataVersion) bool { + // in <=V4 Null types had no validity bitmap + // in >=V5 Null and Union types have no validity bitmap + if version < flatbuf.MetadataVersionV5 { + return id != arrow.NULL + } + + switch id { + case arrow.NULL, arrow.DENSE_UNION, arrow.SPARSE_UNION, arrow.RUN_END_ENCODED: + return false + } + return true +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go new file mode 100644 index 00000000..7ba76067 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "io" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +type compressor interface { + MaxCompressedLen(n int) int + Reset(io.Writer) + io.WriteCloser + Type() flatbuf.CompressionType +} + +type lz4Compressor struct { + *lz4.Writer +} + +func (lz4Compressor) MaxCompressedLen(n int) int { + return lz4.CompressBlockBound(n) +} + +func (lz4Compressor) Type() flatbuf.CompressionType { + return flatbuf.CompressionTypeLZ4_FRAME +} + +type zstdCompressor struct { + *zstd.Encoder +} + +// from zstd.h, ZSTD_COMPRESSBOUND +func (zstdCompressor) MaxCompressedLen(len int) int { + debug.Assert(len >= 0, "MaxCompressedLen called with len less than 0") + extra := uint((uint(128<<10) - uint(len)) >> 11) + if len >= (128 << 10) { + extra = 0 + } + return int(uint(len+(len>>8)) + extra) +} + +func (zstdCompressor) Type() flatbuf.CompressionType { + return flatbuf.CompressionTypeZSTD +} + +func getCompressor(codec flatbuf.CompressionType) compressor { + switch codec { + case flatbuf.CompressionTypeLZ4_FRAME: + w := lz4.NewWriter(nil) + // options here chosen in order to match the C++ implementation + w.Apply(lz4.ChecksumOption(false), lz4.BlockSizeOption(lz4.Block64Kb)) + return &lz4Compressor{w} + case flatbuf.CompressionTypeZSTD: + enc, err := zstd.NewWriter(nil) + if err != nil { + panic(err) + } + return zstdCompressor{enc} + } + return nil +} + +type decompressor interface { + io.Reader + Reset(io.Reader) + Close() +} + +type zstdDecompressor struct { + *zstd.Decoder +} + +func (z *zstdDecompressor) Reset(r io.Reader) { + if err := z.Decoder.Reset(r); err != nil { + panic(err) + } +} + +func (z *zstdDecompressor) Close() { + z.Decoder.Close() +} + +type lz4Decompressor struct { + *lz4.Reader +} + +func (z *lz4Decompressor) Close() {} + +func getDecompressor(codec flatbuf.CompressionType) decompressor { + switch codec { + case flatbuf.CompressionTypeLZ4_FRAME: + return &lz4Decompressor{lz4.NewReader(nil)} + case flatbuf.CompressionTypeZSTD: + dec, err := zstd.NewReader(nil) + if err != nil { + panic(err) + } + return &zstdDecompressor{dec} + } + return nil +} + +type bufferWriter struct { + buf *memory.Buffer + pos int +} + +func (bw *bufferWriter) Write(p []byte) (n int, err error) { + if bw.pos+len(p) >= bw.buf.Cap() { + bw.buf.Reserve(bw.pos + len(p)) + } + n = copy(bw.buf.Buf()[bw.pos:], p) + bw.pos += n + return +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go new file mode 100644 index 00000000..c20d727e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go @@ -0,0 +1,162 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "errors" + "math/bits" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// swap the endianness of the array's buffers as needed in-place to save +// the cost of reallocation. +// +// assumes that nested data buffers are never re-used, if an *array.Data +// child is re-used among the children or the dictionary then this might +// end up double-swapping (putting it back into the original endianness). +// if it is needed to support re-using the buffers, then this can be +// re-factored to instead return a NEW array.Data object with newly +// allocated buffers, rather than doing it in place. +// +// For now this is intended to be used by the IPC readers after loading +// arrays from an IPC message which currently is guaranteed to not re-use +// buffers between arrays. +func swapEndianArrayData(data *array.Data) error { + if data.Offset() != 0 { + return errors.New("unsupported data format: data.offset != 0") + } + if err := swapType(data.DataType(), data); err != nil { + return err + } + return swapChildren(data.Children()) +} + +func swapChildren(children []arrow.ArrayData) (err error) { + for i := range children { + if err = swapEndianArrayData(children[i].(*array.Data)); err != nil { + break + } + } + return +} + +func swapType(dt arrow.DataType, data *array.Data) (err error) { + switch dt.ID() { + case arrow.BINARY, arrow.STRING: + swapOffsets(1, 32, data) + return + case arrow.LARGE_BINARY, arrow.LARGE_STRING: + swapOffsets(1, 64, data) + return + case arrow.NULL, arrow.BOOL, arrow.INT8, arrow.UINT8, + arrow.FIXED_SIZE_BINARY, arrow.FIXED_SIZE_LIST, arrow.STRUCT: + return + } + + switch dt := dt.(type) { + case *arrow.Decimal128Type: + rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + length := data.Buffers()[1].Len() / arrow.Decimal128SizeBytes + for i := 0; i < length; i++ { + idx := i * 2 + tmp := bits.ReverseBytes64(rawdata[idx]) + rawdata[idx] = bits.ReverseBytes64(rawdata[idx+1]) + rawdata[idx+1] = tmp + } + case *arrow.Decimal256Type: + rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + length := data.Buffers()[1].Len() / arrow.Decimal256SizeBytes + for i := 0; i < length; i++ { + idx := i * 4 + tmp0 := bits.ReverseBytes64(rawdata[idx]) + tmp1 := bits.ReverseBytes64(rawdata[idx+1]) + tmp2 := bits.ReverseBytes64(rawdata[idx+2]) + rawdata[idx] = bits.ReverseBytes64(rawdata[idx+3]) + rawdata[idx+1] = tmp2 + rawdata[idx+2] = tmp1 + rawdata[idx+3] = tmp0 + } + case arrow.UnionType: + if dt.Mode() == arrow.DenseMode { + swapOffsets(2, 32, data) + } + case *arrow.ListType: + swapOffsets(1, 32, data) + case *arrow.LargeListType: + swapOffsets(1, 64, data) + case *arrow.MapType: + swapOffsets(1, 32, data) + case *arrow.DayTimeIntervalType: + byteSwapBuffer(32, data.Buffers()[1]) + case *arrow.MonthDayNanoIntervalType: + rawdata := arrow.MonthDayNanoIntervalTraits.CastFromBytes(data.Buffers()[1].Bytes()) + for i, tmp := range rawdata { + rawdata[i].Days = int32(bits.ReverseBytes32(uint32(tmp.Days))) + rawdata[i].Months = int32(bits.ReverseBytes32(uint32(tmp.Months))) + rawdata[i].Nanoseconds = int64(bits.ReverseBytes64(uint64(tmp.Nanoseconds))) + } + case arrow.ExtensionType: + return swapType(dt.StorageType(), data) + case *arrow.DictionaryType: + // dictionary itself was already swapped in ReadDictionary calls + return swapType(dt.IndexType, data) + case arrow.FixedWidthDataType: + byteSwapBuffer(dt.BitWidth(), data.Buffers()[1]) + } + return +} + +// this can get called on an invalid Array Data object by the IPC reader, +// so we won't rely on the data.length and will instead rely on the buffer's +// own size instead. +func byteSwapBuffer(bw int, buf *memory.Buffer) { + if bw == 1 || buf == nil { + // if byte width == 1, no need to swap anything + return + } + + switch bw { + case 16: + data := arrow.Uint16Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes16(data[i]) + } + case 32: + data := arrow.Uint32Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes32(data[i]) + } + case 64: + data := arrow.Uint64Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes64(data[i]) + } + } +} + +func swapOffsets(index int, bitWidth int, data *array.Data) { + if data.Buffers()[index] == nil || data.Buffers()[index].Len() == 0 { + return + } + + // other than unions, offset has one more element than the data.length + // don't yet implement large types, so hardcode 32bit offsets for now + byteSwapBuffer(bitWidth, data.Buffers()[index]) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go new file mode 100644 index 00000000..f008bd49 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go @@ -0,0 +1,739 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v12/arrow/internal" + "github.com/apache/arrow/go/v12/arrow/internal/dictutils" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// FileReader is an Arrow file reader. +type FileReader struct { + r ReadAtSeeker + + footer struct { + offset int64 + buffer *memory.Buffer + data *flatbuf.Footer + } + + // fields dictTypeMap + memo dictutils.Memo + + schema *arrow.Schema + record arrow.Record + + irec int // current record index. used for the arrio.Reader interface + err error // last error + + mem memory.Allocator + swapEndianness bool +} + +// NewFileReader opens an Arrow file using the provided reader r. +func NewFileReader(r ReadAtSeeker, opts ...Option) (*FileReader, error) { + var ( + cfg = newConfig(opts...) + err error + + f = FileReader{ + r: r, + memo: dictutils.NewMemo(), + mem: cfg.alloc, + } + ) + + if cfg.footer.offset <= 0 { + cfg.footer.offset, err = f.r.Seek(0, io.SeekEnd) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could retrieve footer offset: %w", err) + } + } + f.footer.offset = cfg.footer.offset + + err = f.readFooter() + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not decode footer: %w", err) + } + + err = f.readSchema(cfg.ensureNativeEndian) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not decode schema: %w", err) + } + + if cfg.schema != nil && !cfg.schema.Equal(f.schema) { + return nil, fmt.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema) + } + + return &f, err +} + +func (f *FileReader) readFooter() error { + var err error + + if f.footer.offset <= int64(len(Magic)*2+4) { + return fmt.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset) + } + + eof := int64(len(Magic) + 4) + buf := make([]byte, eof) + n, err := f.r.ReadAt(buf, f.footer.offset-eof) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read footer: %w", err) + } + if n != len(buf) { + return fmt.Errorf("arrow/ipc: could not read %d bytes from end of file", len(buf)) + } + + if !bytes.Equal(buf[4:], Magic) { + return errNotArrowFile + } + + size := int64(binary.LittleEndian.Uint32(buf[:4])) + if size <= 0 || size+int64(len(Magic)*2+4) > f.footer.offset { + return errInconsistentFileMetadata + } + + buf = make([]byte, size) + n, err = f.r.ReadAt(buf, f.footer.offset-size-eof) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read footer data: %w", err) + } + if n != len(buf) { + return fmt.Errorf("arrow/ipc: could not read %d bytes from footer data", len(buf)) + } + + f.footer.buffer = memory.NewBufferBytes(buf) + f.footer.data = flatbuf.GetRootAsFooter(buf, 0) + return err +} + +func (f *FileReader) readSchema(ensureNativeEndian bool) error { + var ( + err error + kind dictutils.Kind + ) + + schema := f.footer.data.Schema(nil) + if schema == nil { + return fmt.Errorf("arrow/ipc: could not load schema from flatbuffer data") + } + f.schema, err = schemaFromFB(schema, &f.memo) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read schema: %w", err) + } + + if ensureNativeEndian && !f.schema.IsNativeEndian() { + f.swapEndianness = true + f.schema = f.schema.WithEndianness(endian.NativeEndian) + } + + for i := 0; i < f.NumDictionaries(); i++ { + blk, err := f.dict(i) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read dictionary[%d]: %w", i, err) + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset): + return fmt.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset, i) + case !bitutil.IsMultipleOf8(int64(blk.Meta)): + return fmt.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta, i) + case !bitutil.IsMultipleOf8(blk.Body): + return fmt.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body, i) + } + + msg, err := blk.NewMessage() + if err != nil { + return err + } + + kind, err = readDictionary(&f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem) + if err != nil { + return err + } + if kind == dictutils.KindReplacement { + return errors.New("arrow/ipc: unsupported dictionary replacement in IPC file") + } + } + + return err +} + +func (f *FileReader) block(i int) (fileBlock, error) { + var blk flatbuf.Block + if !f.footer.data.RecordBatches(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return fileBlock{ + Offset: blk.Offset(), + Meta: blk.MetaDataLength(), + Body: blk.BodyLength(), + r: f.r, + mem: f.mem, + }, nil +} + +func (f *FileReader) dict(i int) (fileBlock, error) { + var blk flatbuf.Block + if !f.footer.data.Dictionaries(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } + + return fileBlock{ + Offset: blk.Offset(), + Meta: blk.MetaDataLength(), + Body: blk.BodyLength(), + r: f.r, + mem: f.mem, + }, nil +} + +func (f *FileReader) Schema() *arrow.Schema { + return f.schema +} + +func (f *FileReader) NumDictionaries() int { + if f.footer.data == nil { + return 0 + } + return f.footer.data.DictionariesLength() +} + +func (f *FileReader) NumRecords() int { + return f.footer.data.RecordBatchesLength() +} + +func (f *FileReader) Version() MetadataVersion { + return MetadataVersion(f.footer.data.Version()) +} + +// Close cleans up resources used by the File. +// Close does not close the underlying reader. +func (f *FileReader) Close() error { + if f.footer.data != nil { + f.footer.data = nil + } + + if f.footer.buffer != nil { + f.footer.buffer.Release() + f.footer.buffer = nil + } + + if f.record != nil { + f.record.Release() + f.record = nil + } + return nil +} + +// Record returns the i-th record from the file. +// The returned value is valid until the next call to Record. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Record(i int) (arrow.Record, error) { + record, err := f.RecordAt(i) + if err != nil { + return nil, err + } + + if f.record != nil { + f.record.Release() + } + + f.record = record + return record, nil +} + +// Record returns the i-th record from the file. Ownership is transferred to the +// caller and must call Release() to free the memory. This method is safe to +// call concurrently. +func (f *FileReader) RecordAt(i int) (arrow.Record, error) { + if i < 0 || i > f.NumRecords() { + panic("arrow/ipc: record index out of bounds") + } + + blk, err := f.block(i) + if err != nil { + return nil, err + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset): + return nil, fmt.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset, i) + case !bitutil.IsMultipleOf8(int64(blk.Meta)): + return nil, fmt.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta, i) + case !bitutil.IsMultipleOf8(blk.Body): + return nil, fmt.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body, i) + } + + msg, err := blk.NewMessage() + if err != nil { + return nil, err + } + defer msg.Release() + + if msg.Type() != MessageRecordBatch { + return nil, fmt.Errorf("arrow/ipc: message %d is not a Record", i) + } + + return newRecord(f.schema, &f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem), nil +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +// +// The returned record value is valid until the next call to Read. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Read() (rec arrow.Record, err error) { + if f.irec == f.NumRecords() { + return nil, io.EOF + } + rec, f.err = f.Record(f.irec) + f.irec++ + return rec, f.err +} + +// ReadAt reads the i-th record from the underlying stream and an error, if any. +func (f *FileReader) ReadAt(i int64) (arrow.Record, error) { + return f.Record(int(i)) +} + +func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) arrow.Record { + var ( + msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) + md flatbuf.RecordBatch + codec decompressor + ) + initFB(&md, msg.Header) + rows := md.Length() + + bodyCompress := md.Compression(nil) + if bodyCompress != nil { + codec = getDecompressor(bodyCompress.Codec()) + defer codec.Close() + } + + ctx := &arrayLoaderContext{ + src: ipcSource{ + meta: &md, + r: body, + codec: codec, + mem: mem, + }, + memo: memo, + max: kMaxNestingDepth, + version: MetadataVersion(msg.Version()), + } + + pos := dictutils.NewFieldPos() + cols := make([]arrow.Array, len(schema.Fields())) + for i, field := range schema.Fields() { + data := ctx.loadArray(field.Type) + defer data.Release() + + if err := dictutils.ResolveFieldDict(memo, data, pos.Child(int32(i)), mem); err != nil { + panic(err) + } + + if swapEndianness { + swapEndianArrayData(data.(*array.Data)) + } + + cols[i] = array.MakeFromData(data) + defer cols[i].Release() + } + + return array.NewRecord(schema, cols, rows) +} + +type ipcSource struct { + meta *flatbuf.RecordBatch + r ReadAtSeeker + codec decompressor + mem memory.Allocator +} + +func (src *ipcSource) buffer(i int) *memory.Buffer { + var buf flatbuf.Buffer + if !src.meta.Buffers(&buf, i) { + panic("arrow/ipc: buffer index out of bound") + } + + if buf.Length() == 0 { + return memory.NewBufferBytes(nil) + } + + raw := memory.NewResizableBuffer(src.mem) + if src.codec == nil { + raw.Resize(int(buf.Length())) + _, err := src.r.ReadAt(raw.Bytes(), buf.Offset()) + if err != nil { + panic(err) + } + } else { + sr := io.NewSectionReader(src.r, buf.Offset(), buf.Length()) + var uncompressedSize uint64 + + err := binary.Read(sr, binary.LittleEndian, &uncompressedSize) + if err != nil { + panic(err) + } + + var r io.Reader = sr + // check for an uncompressed buffer + if int64(uncompressedSize) != -1 { + raw.Resize(int(uncompressedSize)) + src.codec.Reset(sr) + r = src.codec + } else { + raw.Resize(int(buf.Length() - 8)) + } + + if _, err = io.ReadFull(r, raw.Bytes()); err != nil { + panic(err) + } + } + + return raw +} + +func (src *ipcSource) fieldMetadata(i int) *flatbuf.FieldNode { + var node flatbuf.FieldNode + if !src.meta.Nodes(&node, i) { + panic("arrow/ipc: field metadata out of bound") + } + return &node +} + +type arrayLoaderContext struct { + src ipcSource + ifield int + ibuffer int + max int + memo *dictutils.Memo + version MetadataVersion +} + +func (ctx *arrayLoaderContext) field() *flatbuf.FieldNode { + field := ctx.src.fieldMetadata(ctx.ifield) + ctx.ifield++ + return field +} + +func (ctx *arrayLoaderContext) buffer() *memory.Buffer { + buf := ctx.src.buffer(ctx.ibuffer) + ctx.ibuffer++ + return buf +} + +func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { + switch dt := dt.(type) { + case *arrow.NullType: + return ctx.loadNull() + + case *arrow.DictionaryType: + indices := ctx.loadPrimitive(dt.IndexType) + defer indices.Release() + return array.NewData(dt, indices.Len(), indices.Buffers(), indices.Children(), indices.NullN(), indices.Offset()) + + case *arrow.BooleanType, + *arrow.Int8Type, *arrow.Int16Type, *arrow.Int32Type, *arrow.Int64Type, + *arrow.Uint8Type, *arrow.Uint16Type, *arrow.Uint32Type, *arrow.Uint64Type, + *arrow.Float16Type, *arrow.Float32Type, *arrow.Float64Type, + *arrow.Decimal128Type, *arrow.Decimal256Type, + *arrow.Time32Type, *arrow.Time64Type, + *arrow.TimestampType, + *arrow.Date32Type, *arrow.Date64Type, + *arrow.MonthIntervalType, *arrow.DayTimeIntervalType, *arrow.MonthDayNanoIntervalType, + *arrow.DurationType: + return ctx.loadPrimitive(dt) + + case *arrow.BinaryType, *arrow.StringType, *arrow.LargeStringType, *arrow.LargeBinaryType: + return ctx.loadBinary(dt) + + case *arrow.FixedSizeBinaryType: + return ctx.loadFixedSizeBinary(dt) + + case *arrow.ListType: + return ctx.loadList(dt) + + case *arrow.LargeListType: + return ctx.loadList(dt) + + case *arrow.FixedSizeListType: + return ctx.loadFixedSizeList(dt) + + case *arrow.StructType: + return ctx.loadStruct(dt) + + case *arrow.MapType: + return ctx.loadMap(dt) + + case arrow.ExtensionType: + storage := ctx.loadArray(dt.StorageType()) + defer storage.Release() + return array.NewData(dt, storage.Len(), storage.Buffers(), storage.Children(), storage.NullN(), storage.Offset()) + + case *arrow.RunEndEncodedType: + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer releaseBuffers(buffers) + + runEnds := ctx.loadChild(dt.RunEnds()) + defer runEnds.Release() + values := ctx.loadChild(dt.Encoded()) + defer values.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{runEnds, values}, int(field.NullCount()), 0) + + case arrow.UnionType: + return ctx.loadUnion(dt) + + default: + panic(fmt.Errorf("arrow/ipc: array type %T not handled yet", dt)) + } +} + +func (ctx *arrayLoaderContext) loadCommon(typ arrow.Type, nbufs int) (*flatbuf.FieldNode, []*memory.Buffer) { + buffers := make([]*memory.Buffer, 0, nbufs) + field := ctx.field() + + var buf *memory.Buffer + + if internal.HasValidityBitmap(typ, flatbuf.MetadataVersion(ctx.version)) { + switch field.NullCount() { + case 0: + ctx.ibuffer++ + default: + buf = ctx.buffer() + } + } + buffers = append(buffers, buf) + + return field, buffers +} + +func (ctx *arrayLoaderContext) loadChild(dt arrow.DataType) arrow.ArrayData { + if ctx.max == 0 { + panic("arrow/ipc: nested type limit reached") + } + ctx.max-- + sub := ctx.loadArray(dt) + ctx.max++ + return sub +} + +func (ctx *arrayLoaderContext) loadNull() arrow.ArrayData { + field := ctx.field() + return array.NewData(arrow.Null, int(field.Length()), nil, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + + switch field.Length() { + case 0: + buffers = append(buffers, nil) + ctx.ibuffer++ + default: + buffers = append(buffers, ctx.buffer()) + } + + defer releaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 3) + buffers = append(buffers, ctx.buffer(), ctx.buffer()) + defer releaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer releaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer releaseBuffers(buffers) + + sub := ctx.loadChild(dt.ValueType()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +type listLike interface { + arrow.DataType + Elem() arrow.DataType +} + +func (ctx *arrayLoaderContext) loadList(dt listLike) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer releaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer releaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer releaseBuffers(buffers) + + subs := make([]arrow.ArrayData, len(dt.Fields())) + for i, f := range dt.Fields() { + subs[i] = ctx.loadChild(f.Type) + } + defer func() { + for i := range subs { + subs[i].Release() + } + }() + + return array.NewData(dt, int(field.Length()), buffers, subs, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData { + // Sparse unions have 2 buffers (a nil validity bitmap, and the type ids) + nBuffers := 2 + // Dense unions have a third buffer, the offsets + if dt.Mode() == arrow.DenseMode { + nBuffers = 3 + } + + field, buffers := ctx.loadCommon(dt.ID(), nBuffers) + if field.NullCount() != 0 && buffers[0] != nil { + panic("arrow/ipc: cannot read pre-1.0.0 union array with top-level validity bitmap") + } + + switch field.Length() { + case 0: + buffers = append(buffers, memory.NewBufferBytes([]byte{})) + ctx.ibuffer++ + if dt.Mode() == arrow.DenseMode { + buffers = append(buffers, nil) + ctx.ibuffer++ + } + default: + buffers = append(buffers, ctx.buffer()) + if dt.Mode() == arrow.DenseMode { + buffers = append(buffers, ctx.buffer()) + } + } + + defer releaseBuffers(buffers) + subs := make([]arrow.ArrayData, len(dt.Fields())) + for i, f := range dt.Fields() { + subs[i] = ctx.loadChild(f.Type) + } + defer func() { + for i := range subs { + subs[i].Release() + } + }() + return array.NewData(dt, int(field.Length()), buffers, subs, 0, 0) +} + +func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) (dictutils.Kind, error) { + var ( + msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) + md flatbuf.DictionaryBatch + data flatbuf.RecordBatch + codec decompressor + ) + initFB(&md, msg.Header) + + md.Data(&data) + bodyCompress := data.Compression(nil) + if bodyCompress != nil { + codec = getDecompressor(bodyCompress.Codec()) + } + + id := md.Id() + // look up the dictionary value type, which must have been added to the + // memo already before calling this function + valueType, ok := memo.Type(id) + if !ok { + return 0, fmt.Errorf("arrow/ipc: no dictionary type found with id: %d", id) + } + + ctx := &arrayLoaderContext{ + src: ipcSource{ + meta: &data, + codec: codec, + r: body, + mem: mem, + }, + memo: memo, + max: kMaxNestingDepth, + } + + dict := ctx.loadArray(valueType) + defer dict.Release() + + if swapEndianness { + swapEndianArrayData(dict.(*array.Data)) + } + + if md.IsDelta() { + memo.AddDelta(id, dict) + return dictutils.KindDelta, nil + } + if memo.AddOrReplace(id, dict) { + return dictutils.KindNew, nil + } + return dictutils.KindReplacement, nil +} + +func releaseBuffers(buffers []*memory.Buffer) { + for _, b := range buffers { + if b != nil { + b.Release() + } + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go new file mode 100644 index 00000000..20c82b96 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go @@ -0,0 +1,394 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal/dictutils" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// PayloadWriter is an interface for injecting a different payloadwriter +// allowing more reusability with the Writer object with other scenarios, +// such as with Flight data +type PayloadWriter interface { + Start() error + WritePayload(Payload) error + Close() error +} + +type pwriter struct { + w io.WriteSeeker + pos int64 + + schema *arrow.Schema + dicts []fileBlock + recs []fileBlock +} + +func (w *pwriter) Start() error { + var err error + + err = w.updatePos() + if err != nil { + return fmt.Errorf("arrow/ipc: could not update position while in start: %w", err) + } + + // only necessary to align to 8-byte boundary at the start of the file + _, err = w.Write(Magic) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write magic Arrow bytes: %w", err) + } + + err = w.align(kArrowIPCAlignment) + if err != nil { + return fmt.Errorf("arrow/ipc: could not align start block: %w", err) + } + + return err +} + +func (w *pwriter) WritePayload(p Payload) error { + blk := fileBlock{Offset: w.pos, Meta: 0, Body: p.size} + n, err := writeIPCPayload(w, p) + if err != nil { + return err + } + + blk.Meta = int32(n) + + err = w.updatePos() + if err != nil { + return fmt.Errorf("arrow/ipc: could not update position while in write-payload: %w", err) + } + + switch flatbuf.MessageHeader(p.msg) { + case flatbuf.MessageHeaderDictionaryBatch: + w.dicts = append(w.dicts, blk) + case flatbuf.MessageHeaderRecordBatch: + w.recs = append(w.recs, blk) + } + + return nil +} + +func (w *pwriter) Close() error { + var err error + + // write file footer + err = w.updatePos() + if err != nil { + return fmt.Errorf("arrow/ipc: could not update position while in close: %w", err) + } + + pos := w.pos + err = writeFileFooter(w.schema, w.dicts, w.recs, w) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write file footer: %w", err) + } + + // write file footer length + err = w.updatePos() // not strictly needed as we passed w to writeFileFooter... + if err != nil { + return fmt.Errorf("arrow/ipc: could not compute file footer length: %w", err) + } + + size := w.pos - pos + if size <= 0 { + return fmt.Errorf("arrow/ipc: invalid file footer size (size=%d)", size) + } + + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, uint32(size)) + _, err = w.Write(buf) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write file footer size: %w", err) + } + + _, err = w.Write(Magic) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write Arrow magic bytes: %w", err) + } + + return nil +} + +func (w *pwriter) updatePos() error { + var err error + w.pos, err = w.w.Seek(0, io.SeekCurrent) + return err +} + +func (w *pwriter) align(align int32) error { + remainder := paddedLength(w.pos, align) - w.pos + if remainder == 0 { + return nil + } + + _, err := w.Write(paddingBytes[:int(remainder)]) + return err +} + +func (w *pwriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.pos += int64(n) + return n, err +} + +func writeIPCPayload(w io.Writer, p Payload) (int, error) { + n, err := writeMessage(p.meta, kArrowIPCAlignment, w) + if err != nil { + return n, err + } + + // now write the buffers + for _, buf := range p.body { + var ( + size int64 + padding int64 + ) + + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + + if size > 0 { + _, err = w.Write(buf.Bytes()) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write payload message body: %w", err) + } + } + + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write payload message padding: %w", err) + } + } + } + + return n, err +} + +// Payload is the underlying message object which is passed to the payload writer +// for actually writing out ipc messages +type Payload struct { + msg MessageType + meta *memory.Buffer + body []*memory.Buffer + size int64 // length of body +} + +// Meta returns the buffer containing the metadata for this payload, +// callers must call Release on the buffer +func (p *Payload) Meta() *memory.Buffer { + if p.meta != nil { + p.meta.Retain() + } + return p.meta +} + +// SerializeBody serializes the body buffers and writes them to the provided +// writer. +func (p *Payload) SerializeBody(w io.Writer) error { + for _, data := range p.body { + if data == nil { + continue + } + + size := int64(data.Len()) + padding := bitutil.CeilByte64(size) - size + if size > 0 { + if _, err := w.Write(data.Bytes()); err != nil { + return fmt.Errorf("arrow/ipc: could not write payload message body: %w", err) + } + + if padding > 0 { + if _, err := w.Write(paddingBytes[:padding]); err != nil { + return fmt.Errorf("arrow/ipc: could not write payload message padding bytes: %w", err) + } + } + } + } + return nil +} + +func (p *Payload) Release() { + if p.meta != nil { + p.meta.Release() + p.meta = nil + } + for i, b := range p.body { + if b == nil { + continue + } + b.Release() + p.body[i] = nil + } +} + +type payloads []Payload + +func (ps payloads) Release() { + for i := range ps { + ps[i].Release() + } +} + +// FileWriter is an Arrow file writer. +type FileWriter struct { + w io.WriteSeeker + + mem memory.Allocator + + header struct { + started bool + offset int64 + } + + footer struct { + written bool + } + + pw PayloadWriter + + schema *arrow.Schema + mapper dictutils.Mapper + codec flatbuf.CompressionType + compressNP int + minSpaceSavings *float64 + + // map of the last written dictionaries by id + // so we can avoid writing the same dictionary over and over + // also needed for correctness when writing IPC format which + // does not allow replacements or deltas. + lastWrittenDicts map[int64]arrow.Array +} + +// NewFileWriter opens an Arrow file using the provided writer w. +func NewFileWriter(w io.WriteSeeker, opts ...Option) (*FileWriter, error) { + var ( + cfg = newConfig(opts...) + err error + ) + + f := FileWriter{ + w: w, + pw: &pwriter{w: w, schema: cfg.schema, pos: -1}, + mem: cfg.alloc, + schema: cfg.schema, + codec: cfg.codec, + compressNP: cfg.compressNP, + minSpaceSavings: cfg.minSpaceSavings, + } + + pos, err := f.w.Seek(0, io.SeekCurrent) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not seek current position: %w", err) + } + f.header.offset = pos + + return &f, err +} + +func (f *FileWriter) Close() error { + err := f.checkStarted() + if err != nil { + return fmt.Errorf("arrow/ipc: could not write empty file: %w", err) + } + + if f.footer.written { + return nil + } + + err = f.pw.Close() + if err != nil { + return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + f.footer.written = true + + return nil +} + +func (f *FileWriter) Write(rec arrow.Record) error { + schema := rec.Schema() + if schema == nil || !schema.Equal(f.schema) { + return errInconsistentSchema + } + + if err := f.checkStarted(); err != nil { + return fmt.Errorf("arrow/ipc: could not write header: %w", err) + } + + const allow64b = true + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder(f.mem, 0, kMaxNestingDepth, allow64b, f.codec, f.compressNP, f.minSpaceSavings) + ) + defer data.Release() + + err := writeDictionaryPayloads(f.mem, rec, true, false, &f.mapper, f.lastWrittenDicts, f.pw, enc) + if err != nil { + return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err) + } + + enc.reset() + if err := enc.Encode(&data, rec); err != nil { + return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return f.pw.WritePayload(data) +} + +func (f *FileWriter) checkStarted() error { + if !f.header.started { + return f.start() + } + return nil +} + +func (f *FileWriter) start() error { + f.header.started = true + err := f.pw.Start() + if err != nil { + return err + } + + f.mapper.ImportSchema(f.schema) + f.lastWrittenDicts = make(map[int64]arrow.Array) + + // write out schema payloads + ps := payloadFromSchema(f.schema, f.mem, &f.mapper) + defer ps.Release() + + for _, data := range ps { + err = f.pw.WritePayload(data) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go new file mode 100644 index 00000000..e651a993 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go @@ -0,0 +1,199 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "io" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/arrio" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +const ( + errNotArrowFile = errString("arrow/ipc: not an Arrow file") + errInconsistentFileMetadata = errString("arrow/ipc: file is smaller than indicated metadata size") + errInconsistentSchema = errString("arrow/ipc: tried to write record batch with different schema") + errMaxRecursion = errString("arrow/ipc: max recursion depth reached") + errBigArray = errString("arrow/ipc: array larger than 2^31-1 in length") + + kArrowAlignment = 64 // buffers are padded to 64b boundaries (for SIMD) + kTensorAlignment = 64 // tensors are padded to 64b boundaries + kArrowIPCAlignment = 8 // align on 8b boundaries in IPC +) + +var ( + paddingBytes [kArrowAlignment]byte + kEOS = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, 0} // end of stream message + kIPCContToken uint32 = 0xFFFFFFFF // 32b continuation indicator for FlatBuffers 8b alignment +) + +func paddedLength(nbytes int64, alignment int32) int64 { + align := int64(alignment) + return ((nbytes + align - 1) / align) * align +} + +type errString string + +func (s errString) Error() string { + return string(s) +} + +type ReadAtSeeker interface { + io.Reader + io.Seeker + io.ReaderAt +} + +type config struct { + alloc memory.Allocator + schema *arrow.Schema + footer struct { + offset int64 + } + codec flatbuf.CompressionType + compressNP int + ensureNativeEndian bool + noAutoSchema bool + emitDictDeltas bool + minSpaceSavings *float64 +} + +func newConfig(opts ...Option) *config { + cfg := &config{ + alloc: memory.NewGoAllocator(), + codec: -1, // uncompressed + ensureNativeEndian: true, + } + + for _, opt := range opts { + opt(cfg) + } + + return cfg +} + +// Option is a functional option to configure opening or creating Arrow files +// and streams. +type Option func(*config) + +// WithFooterOffset specifies the Arrow footer position in bytes. +func WithFooterOffset(offset int64) Option { + return func(cfg *config) { + cfg.footer.offset = offset + } +} + +// WithAllocator specifies the Arrow memory allocator used while building records. +func WithAllocator(mem memory.Allocator) Option { + return func(cfg *config) { + cfg.alloc = mem + } +} + +// WithSchema specifies the Arrow schema to be used for reading or writing. +func WithSchema(schema *arrow.Schema) Option { + return func(cfg *config) { + cfg.schema = schema + } +} + +// WithLZ4 tells the writer to use LZ4 Frame compression on the data +// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress +func WithLZ4() Option { + return func(cfg *config) { + cfg.codec = flatbuf.CompressionTypeLZ4_FRAME + } +} + +// WithZstd tells the writer to use ZSTD compression on the data +// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress +func WithZstd() Option { + return func(cfg *config) { + cfg.codec = flatbuf.CompressionTypeZSTD + } +} + +// WithCompressConcurrency specifies a number of goroutines to spin up for +// concurrent compression of the body buffers when writing compress IPC records. +// If n <= 1 then compression will be done serially without goroutine +// parallelization. Default is 0. +func WithCompressConcurrency(n int) Option { + return func(cfg *config) { + cfg.compressNP = n + } +} + +// WithEnsureNativeEndian specifies whether or not to automatically byte-swap +// buffers with endian-sensitive data if the schema's endianness is not the +// platform-native endianness. This includes all numeric types, temporal types, +// decimal types, as well as the offset buffers of variable-sized binary and +// list-like types. +// +// This is only relevant to ipc Reader objects, not to writers. This defaults +// to true. +func WithEnsureNativeEndian(v bool) Option { + return func(cfg *config) { + cfg.ensureNativeEndian = v + } +} + +// WithDelayedReadSchema alters the ipc.Reader behavior to delay attempting +// to read the schema from the stream until the first call to Next instead +// of immediately attempting to read a schema from the stream when created. +func WithDelayReadSchema(v bool) Option { + return func(cfg *config) { + cfg.noAutoSchema = v + } +} + +// WithDictionaryDeltas specifies whether or not to emit dictionary deltas. +func WithDictionaryDeltas(v bool) Option { + return func(cfg *config) { + cfg.emitDictDeltas = v + } +} + +// WithMinSpaceSavings specifies a percentage of space savings for +// compression to be applied to buffers. +// +// Space savings is calculated as (1.0 - compressedSize / uncompressedSize). +// +// For example, if minSpaceSavings = 0.1, a 100-byte body buffer won't +// undergo compression if its expected compressed size exceeds 90 bytes. +// If this option is unset, compression will be used indiscriminately. If +// no codec was supplied, this option is ignored. +// +// Values outside of the range [0,1] are handled as errors. +// +// Note that enabling this option may result in unreadable data for Arrow +// Go and C++ versions prior to 12.0.0. +func WithMinSpaceSavings(savings float64) Option { + return func(cfg *config) { + cfg.minSpaceSavings = &savings + } +} + +var ( + _ arrio.Reader = (*Reader)(nil) + _ arrio.Writer = (*Writer)(nil) + _ arrio.Reader = (*FileReader)(nil) + _ arrio.Writer = (*FileWriter)(nil) + + _ arrio.ReaderAt = (*FileReader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go new file mode 100644 index 00000000..adc231cd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go @@ -0,0 +1,242 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// MetadataVersion represents the Arrow metadata version. +type MetadataVersion flatbuf.MetadataVersion + +const ( + MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow-0.1.0 + MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow-0.2.0 + MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow-0.3.0 to 0.7.1 + MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow-0.8.0 + MetadataV5 = MetadataVersion(flatbuf.MetadataVersionV5) // version for >= Arrow-1.0.0, backward compatible with v4 +) + +func (m MetadataVersion) String() string { + if v, ok := flatbuf.EnumNamesMetadataVersion[flatbuf.MetadataVersion(m)]; ok { + return v + } + return fmt.Sprintf("MetadataVersion(%d)", int16(m)) +} + +// MessageType represents the type of Message in an Arrow format. +type MessageType flatbuf.MessageHeader + +const ( + MessageNone = MessageType(flatbuf.MessageHeaderNONE) + MessageSchema = MessageType(flatbuf.MessageHeaderSchema) + MessageDictionaryBatch = MessageType(flatbuf.MessageHeaderDictionaryBatch) + MessageRecordBatch = MessageType(flatbuf.MessageHeaderRecordBatch) + MessageTensor = MessageType(flatbuf.MessageHeaderTensor) + MessageSparseTensor = MessageType(flatbuf.MessageHeaderSparseTensor) +) + +func (m MessageType) String() string { + if v, ok := flatbuf.EnumNamesMessageHeader[flatbuf.MessageHeader(m)]; ok { + return v + } + return fmt.Sprintf("MessageType(%d)", int(m)) +} + +// Message is an IPC message, including metadata and body. +type Message struct { + refCount int64 + msg *flatbuf.Message + meta *memory.Buffer + body *memory.Buffer +} + +// NewMessage creates a new message from the metadata and body buffers. +// NewMessage panics if any of these buffers is nil. +func NewMessage(meta, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + meta.Retain() + body.Retain() + return &Message{ + refCount: 1, + msg: flatbuf.GetRootAsMessage(meta.Bytes(), 0), + meta: meta, + body: body, + } +} + +func newMessageFromFB(meta *flatbuf.Message, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + body.Retain() + return &Message{ + refCount: 1, + msg: meta, + meta: memory.NewBufferBytes(meta.Table().Bytes), + body: body, + } +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (msg *Message) Retain() { + atomic.AddInt64(&msg.refCount, 1) +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (msg *Message) Release() { + debug.Assert(atomic.LoadInt64(&msg.refCount) > 0, "too many releases") + + if atomic.AddInt64(&msg.refCount, -1) == 0 { + msg.meta.Release() + msg.body.Release() + msg.msg = nil + msg.meta = nil + msg.body = nil + } +} + +func (msg *Message) Version() MetadataVersion { + return MetadataVersion(msg.msg.Version()) +} + +func (msg *Message) Type() MessageType { + return MessageType(msg.msg.HeaderType()) +} + +func (msg *Message) BodyLen() int64 { + return msg.msg.BodyLength() +} + +type MessageReader interface { + Message() (*Message, error) + Release() + Retain() +} + +// MessageReader reads messages from an io.Reader. +type messageReader struct { + r io.Reader + + refCount int64 + msg *Message + + mem memory.Allocator +} + +// NewMessageReader returns a reader that reads messages from an input stream. +func NewMessageReader(r io.Reader, opts ...Option) MessageReader { + cfg := newConfig() + for _, opt := range opts { + opt(cfg) + } + + return &messageReader{r: r, refCount: 1, mem: cfg.alloc} +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *messageReader) Retain() { + atomic.AddInt64(&r.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *messageReader) Release() { + debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases") + + if atomic.AddInt64(&r.refCount, -1) == 0 { + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + } +} + +// Message returns the current message that has been extracted from the +// underlying stream. +// It is valid until the next call to Message. +func (r *messageReader) Message() (*Message, error) { + var buf = make([]byte, 4) + _, err := io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read continuation indicator: %w", err) + } + var ( + cid = binary.LittleEndian.Uint32(buf) + msgLen int32 + ) + switch cid { + case 0: + // EOS message. + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + case kIPCContToken: + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message length: %w", err) + } + msgLen = int32(binary.LittleEndian.Uint32(buf)) + if msgLen == 0 { + // optional 0 EOS control message + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + } + + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + msgLen = int32(cid) + } + + buf = make([]byte, msgLen) + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + meta := flatbuf.GetRootAsMessage(buf, 0) + bodyLen := meta.BodyLength() + + body := memory.NewResizableBuffer(r.mem) + defer body.Release() + body.Resize(int(bodyLen)) + + _, err = io.ReadFull(r.r, body.Bytes()) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err) + } + + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + r.msg = newMessageFromFB(meta, body) + + return r.msg, nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go new file mode 100644 index 00000000..980425e5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go @@ -0,0 +1,1261 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v12/arrow/internal/dictutils" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" + flatbuffers "github.com/google/flatbuffers/go" +) + +// Magic string identifying an Apache Arrow file. +var Magic = []byte("ARROW1") + +const ( + currentMetadataVersion = MetadataV5 + minMetadataVersion = MetadataV4 + + // constants for the extension type metadata keys for the type name and + // any extension metadata to be passed to deserialize. + ExtensionTypeKeyName = "ARROW:extension:name" + ExtensionMetadataKeyName = "ARROW:extension:metadata" + + // ARROW-109: We set this number arbitrarily to help catch user mistakes. For + // deeply nested schemas, it is expected the user will indicate explicitly the + // maximum allowed recursion depth + kMaxNestingDepth = 64 +) + +type startVecFunc func(b *flatbuffers.Builder, n int) flatbuffers.UOffsetT + +type fieldMetadata struct { + Len int64 + Nulls int64 + Offset int64 +} + +type bufferMetadata struct { + Offset int64 // relative offset into the memory page to the starting byte of the buffer + Len int64 // absolute length in bytes of the buffer +} + +type fileBlock struct { + Offset int64 + Meta int32 + Body int64 + + r io.ReaderAt + mem memory.Allocator +} + +func fileBlocksToFB(b *flatbuffers.Builder, blocks []fileBlock, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(blocks)) + for i := len(blocks) - 1; i >= 0; i-- { + blk := blocks[i] + flatbuf.CreateBlock(b, blk.Offset, blk.Meta, blk.Body) + } + + return b.EndVector(len(blocks)) +} + +func (blk fileBlock) NewMessage() (*Message, error) { + var ( + err error + buf []byte + body *memory.Buffer + meta *memory.Buffer + r = blk.section() + ) + + meta = memory.NewResizableBuffer(blk.mem) + meta.Resize(int(blk.Meta)) + defer meta.Release() + + buf = meta.Bytes() + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + prefix := 0 + switch binary.LittleEndian.Uint32(buf) { + case 0: + case kIPCContToken: + prefix = 8 + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + prefix = 4 + } + + // drop buf-size already known from blk.Meta + meta = memory.SliceBuffer(meta, prefix, int(blk.Meta)-prefix) + defer meta.Release() + + body = memory.NewResizableBuffer(blk.mem) + defer body.Release() + body.Resize(int(blk.Body)) + buf = body.Bytes() + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err) + } + + return NewMessage(meta, body), nil +} + +func (blk fileBlock) section() io.Reader { + return io.NewSectionReader(blk.r, blk.Offset, int64(blk.Meta)+blk.Body) +} + +func unitFromFB(unit flatbuf.TimeUnit) arrow.TimeUnit { + switch unit { + case flatbuf.TimeUnitSECOND: + return arrow.Second + case flatbuf.TimeUnitMILLISECOND: + return arrow.Millisecond + case flatbuf.TimeUnitMICROSECOND: + return arrow.Microsecond + case flatbuf.TimeUnitNANOSECOND: + return arrow.Nanosecond + default: + panic(fmt.Errorf("arrow/ipc: invalid flatbuf.TimeUnit(%d) value", unit)) + } +} + +func unitToFB(unit arrow.TimeUnit) flatbuf.TimeUnit { + switch unit { + case arrow.Second: + return flatbuf.TimeUnitSECOND + case arrow.Millisecond: + return flatbuf.TimeUnitMILLISECOND + case arrow.Microsecond: + return flatbuf.TimeUnitMICROSECOND + case arrow.Nanosecond: + return flatbuf.TimeUnitNANOSECOND + default: + panic(fmt.Errorf("arrow/ipc: invalid arrow.TimeUnit(%d) value", unit)) + } +} + +// initFB is a helper function to handle flatbuffers' polymorphism. +func initFB(t interface { + Table() flatbuffers.Table + Init([]byte, flatbuffers.UOffsetT) +}, f func(tbl *flatbuffers.Table) bool) { + tbl := t.Table() + if !f(&tbl) { + panic(fmt.Errorf("arrow/ipc: could not initialize %T from flatbuffer", t)) + } + t.Init(tbl.Bytes, tbl.Pos) +} + +func fieldFromFB(field *flatbuf.Field, pos dictutils.FieldPos, memo *dictutils.Memo) (arrow.Field, error) { + var ( + err error + o arrow.Field + ) + + o.Name = string(field.Name()) + o.Nullable = field.Nullable() + o.Metadata, err = metadataFromFB(field) + if err != nil { + return o, err + } + + n := field.ChildrenLength() + children := make([]arrow.Field, n) + for i := range children { + var childFB flatbuf.Field + if !field.Children(&childFB, i) { + return o, fmt.Errorf("arrow/ipc: could not load field child %d", i) + + } + child, err := fieldFromFB(&childFB, pos.Child(int32(i)), memo) + if err != nil { + return o, fmt.Errorf("arrow/ipc: could not convert field child %d: %w", i, err) + } + children[i] = child + } + + o.Type, err = typeFromFB(field, pos, children, &o.Metadata, memo) + if err != nil { + return o, fmt.Errorf("arrow/ipc: could not convert field type: %w", err) + } + + return o, nil +} + +func fieldToFB(b *flatbuffers.Builder, pos dictutils.FieldPos, field arrow.Field, memo *dictutils.Mapper) flatbuffers.UOffsetT { + var visitor = fieldVisitor{b: b, memo: memo, pos: pos, meta: make(map[string]string)} + return visitor.result(field) +} + +type fieldVisitor struct { + b *flatbuffers.Builder + memo *dictutils.Mapper + pos dictutils.FieldPos + dtype flatbuf.Type + offset flatbuffers.UOffsetT + kids []flatbuffers.UOffsetT + meta map[string]string +} + +func (fv *fieldVisitor) visit(field arrow.Field) { + dt := field.Type + switch dt := dt.(type) { + case *arrow.NullType: + fv.dtype = flatbuf.TypeNull + flatbuf.NullStart(fv.b) + fv.offset = flatbuf.NullEnd(fv.b) + + case *arrow.BooleanType: + fv.dtype = flatbuf.TypeBool + flatbuf.BoolStart(fv.b) + fv.offset = flatbuf.BoolEnd(fv.b) + + case *arrow.Uint8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Int8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Float16Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float32Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float64Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Decimal128Type: + fv.dtype = flatbuf.TypeDecimal + flatbuf.DecimalStart(fv.b) + flatbuf.DecimalAddPrecision(fv.b, dt.Precision) + flatbuf.DecimalAddScale(fv.b, dt.Scale) + flatbuf.DecimalAddBitWidth(fv.b, 128) + fv.offset = flatbuf.DecimalEnd(fv.b) + + case *arrow.Decimal256Type: + fv.dtype = flatbuf.TypeDecimal + flatbuf.DecimalStart(fv.b) + flatbuf.DecimalAddPrecision(fv.b, dt.Precision) + flatbuf.DecimalAddScale(fv.b, dt.Scale) + flatbuf.DecimalAddBitWidth(fv.b, 256) + fv.offset = flatbuf.DecimalEnd(fv.b) + + case *arrow.FixedSizeBinaryType: + fv.dtype = flatbuf.TypeFixedSizeBinary + flatbuf.FixedSizeBinaryStart(fv.b) + flatbuf.FixedSizeBinaryAddByteWidth(fv.b, int32(dt.ByteWidth)) + fv.offset = flatbuf.FixedSizeBinaryEnd(fv.b) + + case *arrow.BinaryType: + fv.dtype = flatbuf.TypeBinary + flatbuf.BinaryStart(fv.b) + fv.offset = flatbuf.BinaryEnd(fv.b) + + case *arrow.LargeBinaryType: + fv.dtype = flatbuf.TypeLargeBinary + flatbuf.LargeBinaryStart(fv.b) + fv.offset = flatbuf.LargeBinaryEnd(fv.b) + + case *arrow.StringType: + fv.dtype = flatbuf.TypeUtf8 + flatbuf.Utf8Start(fv.b) + fv.offset = flatbuf.Utf8End(fv.b) + + case *arrow.LargeStringType: + fv.dtype = flatbuf.TypeLargeUtf8 + flatbuf.LargeUtf8Start(fv.b) + fv.offset = flatbuf.LargeUtf8End(fv.b) + + case *arrow.Date32Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitDAY) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Date64Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitMILLISECOND) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Time32Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 32) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.Time64Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 64) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.TimestampType: + fv.dtype = flatbuf.TypeTimestamp + unit := unitToFB(dt.Unit) + var tz flatbuffers.UOffsetT + if dt.TimeZone != "" { + tz = fv.b.CreateString(dt.TimeZone) + } + flatbuf.TimestampStart(fv.b) + flatbuf.TimestampAddUnit(fv.b, unit) + flatbuf.TimestampAddTimezone(fv.b, tz) + fv.offset = flatbuf.TimestampEnd(fv.b) + + case *arrow.StructType: + fv.dtype = flatbuf.TypeStruct_ + offsets := make([]flatbuffers.UOffsetT, len(dt.Fields())) + for i, field := range dt.Fields() { + offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) + } + flatbuf.Struct_Start(fv.b) + for i := len(offsets) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(offsets[i]) + } + fv.offset = flatbuf.Struct_End(fv.b) + fv.kids = append(fv.kids, offsets...) + + case *arrow.ListType: + fv.dtype = flatbuf.TypeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.ListStart(fv.b) + fv.offset = flatbuf.ListEnd(fv.b) + + case *arrow.LargeListType: + fv.dtype = flatbuf.TypeLargeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.LargeListStart(fv.b) + fv.offset = flatbuf.LargeListEnd(fv.b) + + case *arrow.FixedSizeListType: + fv.dtype = flatbuf.TypeFixedSizeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.FixedSizeListStart(fv.b) + flatbuf.FixedSizeListAddListSize(fv.b, dt.Len()) + fv.offset = flatbuf.FixedSizeListEnd(fv.b) + + case *arrow.MonthIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitYEAR_MONTH) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DayTimeIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitDAY_TIME) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.MonthDayNanoIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitMONTH_DAY_NANO) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DurationType: + fv.dtype = flatbuf.TypeDuration + unit := unitToFB(dt.Unit) + flatbuf.DurationStart(fv.b) + flatbuf.DurationAddUnit(fv.b, unit) + fv.offset = flatbuf.DurationEnd(fv.b) + + case *arrow.MapType: + fv.dtype = flatbuf.TypeMap + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ValueField(), fv.memo)) + flatbuf.MapStart(fv.b) + flatbuf.MapAddKeysSorted(fv.b, dt.KeysSorted) + fv.offset = flatbuf.MapEnd(fv.b) + + case *arrow.RunEndEncodedType: + fv.dtype = flatbuf.TypeRunEndEncoded + var offsets [2]flatbuffers.UOffsetT + offsets[0] = fieldToFB(fv.b, fv.pos.Child(0), + arrow.Field{Name: "run_ends", Type: dt.RunEnds()}, fv.memo) + offsets[1] = fieldToFB(fv.b, fv.pos.Child(1), + arrow.Field{Name: "values", Type: dt.Encoded(), Nullable: true}, fv.memo) + flatbuf.RunEndEncodedStart(fv.b) + fv.b.PrependUOffsetT(offsets[1]) + fv.b.PrependUOffsetT(offsets[0]) + fv.offset = flatbuf.RunEndEncodedEnd(fv.b) + fv.kids = append(fv.kids, offsets[0], offsets[1]) + + case arrow.ExtensionType: + field.Type = dt.StorageType() + fv.visit(field) + fv.meta[ExtensionTypeKeyName] = dt.ExtensionName() + fv.meta[ExtensionMetadataKeyName] = string(dt.Serialize()) + + case *arrow.DictionaryType: + field.Type = dt.ValueType + fv.visit(field) + + case arrow.UnionType: + fv.dtype = flatbuf.TypeUnion + offsets := make([]flatbuffers.UOffsetT, len(dt.Fields())) + for i, field := range dt.Fields() { + offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) + } + + codes := dt.TypeCodes() + flatbuf.UnionStartTypeIdsVector(fv.b, len(codes)) + + for i := len(codes) - 1; i >= 0; i-- { + fv.b.PlaceInt32(int32(codes[i])) + } + fbTypeIDs := fv.b.EndVector(len(dt.TypeCodes())) + flatbuf.UnionStart(fv.b) + switch dt.Mode() { + case arrow.SparseMode: + flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeSparse) + case arrow.DenseMode: + flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeDense) + default: + panic("invalid union mode") + } + flatbuf.UnionAddTypeIds(fv.b, fbTypeIDs) + fv.offset = flatbuf.UnionEnd(fv.b) + fv.kids = append(fv.kids, offsets...) + + default: + err := fmt.Errorf("arrow/ipc: invalid data type %v", dt) + panic(err) // FIXME(sbinet): implement all data-types. + } +} + +func (fv *fieldVisitor) result(field arrow.Field) flatbuffers.UOffsetT { + nameFB := fv.b.CreateString(field.Name) + + fv.visit(field) + + flatbuf.FieldStartChildrenVector(fv.b, len(fv.kids)) + for i := len(fv.kids) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(fv.kids[i]) + } + kidsFB := fv.b.EndVector(len(fv.kids)) + + storageType := field.Type + if storageType.ID() == arrow.EXTENSION { + storageType = storageType.(arrow.ExtensionType).StorageType() + } + + var dictFB flatbuffers.UOffsetT + if storageType.ID() == arrow.DICTIONARY { + idxType := field.Type.(*arrow.DictionaryType).IndexType.(arrow.FixedWidthDataType) + + dictID, err := fv.memo.GetFieldID(fv.pos.Path()) + if err != nil { + panic(err) + } + var signed bool + switch idxType.ID() { + case arrow.UINT8, arrow.UINT16, arrow.UINT32, arrow.UINT64: + signed = false + case arrow.INT8, arrow.INT16, arrow.INT32, arrow.INT64: + signed = true + } + indexTypeOffset := intToFB(fv.b, int32(idxType.BitWidth()), signed) + flatbuf.DictionaryEncodingStart(fv.b) + flatbuf.DictionaryEncodingAddId(fv.b, dictID) + flatbuf.DictionaryEncodingAddIndexType(fv.b, indexTypeOffset) + flatbuf.DictionaryEncodingAddIsOrdered(fv.b, field.Type.(*arrow.DictionaryType).Ordered) + dictFB = flatbuf.DictionaryEncodingEnd(fv.b) + } + + var ( + metaFB flatbuffers.UOffsetT + kvs []flatbuffers.UOffsetT + ) + for i, k := range field.Metadata.Keys() { + v := field.Metadata.Values()[i] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + { + keys := make([]string, 0, len(fv.meta)) + for k := range fv.meta { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := fv.meta[k] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + } + if len(kvs) > 0 { + flatbuf.FieldStartCustomMetadataVector(fv.b, len(kvs)) + for i := len(kvs) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(kvs[i]) + } + metaFB = fv.b.EndVector(len(kvs)) + } + + flatbuf.FieldStart(fv.b) + flatbuf.FieldAddName(fv.b, nameFB) + flatbuf.FieldAddNullable(fv.b, field.Nullable) + flatbuf.FieldAddTypeType(fv.b, fv.dtype) + flatbuf.FieldAddType(fv.b, fv.offset) + flatbuf.FieldAddDictionary(fv.b, dictFB) + flatbuf.FieldAddChildren(fv.b, kidsFB) + flatbuf.FieldAddCustomMetadata(fv.b, metaFB) + + offset := flatbuf.FieldEnd(fv.b) + + return offset +} + +func typeFromFB(field *flatbuf.Field, pos dictutils.FieldPos, children []arrow.Field, md *arrow.Metadata, memo *dictutils.Memo) (arrow.DataType, error) { + var data flatbuffers.Table + if !field.Type(&data) { + return nil, fmt.Errorf("arrow/ipc: could not load field type data") + } + + dt, err := concreteTypeFromFB(field.TypeType(), data, children) + if err != nil { + return dt, err + } + + var ( + dictID = int64(-1) + dictValueType arrow.DataType + encoding = field.Dictionary(nil) + ) + if encoding != nil { + var idt flatbuf.Int + encoding.IndexType(&idt) + idxType, err := intFromFB(idt) + if err != nil { + return nil, err + } + + dictValueType = dt + dt = &arrow.DictionaryType{IndexType: idxType, ValueType: dictValueType, Ordered: encoding.IsOrdered()} + dictID = encoding.Id() + + if err = memo.Mapper.AddField(dictID, pos.Path()); err != nil { + return dt, err + } + if err = memo.AddType(dictID, dictValueType); err != nil { + return dt, err + } + + } + + // look for extension metadata in custom metadata field. + if md.Len() > 0 { + i := md.FindKey(ExtensionTypeKeyName) + if i < 0 { + return dt, err + } + + extType := arrow.GetExtensionType(md.Values()[i]) + if extType == nil { + // if the extension type is unknown, we do not error here. + // simply return the storage type. + return dt, err + } + + var ( + data string + dataIdx int + ) + + if dataIdx = md.FindKey(ExtensionMetadataKeyName); dataIdx >= 0 { + data = md.Values()[dataIdx] + } + + dt, err = extType.Deserialize(dt, data) + if err != nil { + return dt, err + } + + mdkeys := md.Keys() + mdvals := md.Values() + if dataIdx < 0 { + // if there was no extension metadata, just the name, we only have to + // remove the extension name metadata key/value to ensure roundtrip + // metadata consistency + *md = arrow.NewMetadata(append(mdkeys[:i], mdkeys[i+1:]...), append(mdvals[:i], mdvals[i+1:]...)) + } else { + // if there was extension metadata, we need to remove both the type name + // and the extension metadata keys and values. + newkeys := make([]string, 0, md.Len()-2) + newvals := make([]string, 0, md.Len()-2) + for j := range mdkeys { + if j != i && j != dataIdx { // copy everything except the extension metadata keys/values + newkeys = append(newkeys, mdkeys[j]) + newvals = append(newvals, mdvals[j]) + } + } + *md = arrow.NewMetadata(newkeys, newvals) + } + } + + return dt, err +} + +func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arrow.Field) (arrow.DataType, error) { + switch typ { + case flatbuf.TypeNONE: + return nil, fmt.Errorf("arrow/ipc: Type metadata cannot be none") + + case flatbuf.TypeNull: + return arrow.Null, nil + + case flatbuf.TypeInt: + var dt flatbuf.Int + dt.Init(data.Bytes, data.Pos) + return intFromFB(dt) + + case flatbuf.TypeFloatingPoint: + var dt flatbuf.FloatingPoint + dt.Init(data.Bytes, data.Pos) + return floatFromFB(dt) + + case flatbuf.TypeDecimal: + var dt flatbuf.Decimal + dt.Init(data.Bytes, data.Pos) + return decimalFromFB(dt) + + case flatbuf.TypeBinary: + return arrow.BinaryTypes.Binary, nil + + case flatbuf.TypeFixedSizeBinary: + var dt flatbuf.FixedSizeBinary + dt.Init(data.Bytes, data.Pos) + return &arrow.FixedSizeBinaryType{ByteWidth: int(dt.ByteWidth())}, nil + + case flatbuf.TypeUtf8: + return arrow.BinaryTypes.String, nil + + case flatbuf.TypeLargeBinary: + return arrow.BinaryTypes.LargeBinary, nil + + case flatbuf.TypeLargeUtf8: + return arrow.BinaryTypes.LargeString, nil + + case flatbuf.TypeBool: + return arrow.FixedWidthTypes.Boolean, nil + + case flatbuf.TypeList: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: List must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.ListOfField(children[0]) + return dt, nil + + case flatbuf.TypeLargeList: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: LargeList must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.LargeListOfField(children[0]) + return dt, nil + + case flatbuf.TypeFixedSizeList: + var dt flatbuf.FixedSizeList + dt.Init(data.Bytes, data.Pos) + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: FixedSizeList must have exactly 1 child field (got=%d)", len(children)) + } + ret := arrow.FixedSizeListOfField(dt.ListSize(), children[0]) + return ret, nil + + case flatbuf.TypeStruct_: + return arrow.StructOf(children...), nil + + case flatbuf.TypeUnion: + var dt flatbuf.Union + dt.Init(data.Bytes, data.Pos) + var ( + mode arrow.UnionMode + typeIDs []arrow.UnionTypeCode + ) + + switch dt.Mode() { + case flatbuf.UnionModeSparse: + mode = arrow.SparseMode + case flatbuf.UnionModeDense: + mode = arrow.DenseMode + } + + typeIDLen := dt.TypeIdsLength() + + if typeIDLen == 0 { + for i := range children { + typeIDs = append(typeIDs, int8(i)) + } + } else { + for i := 0; i < typeIDLen; i++ { + id := dt.TypeIds(i) + code := arrow.UnionTypeCode(id) + if int32(code) != id { + return nil, errors.New("union type id out of bounds") + } + typeIDs = append(typeIDs, code) + } + } + + return arrow.UnionOf(mode, children, typeIDs), nil + + case flatbuf.TypeTime: + var dt flatbuf.Time + dt.Init(data.Bytes, data.Pos) + return timeFromFB(dt) + + case flatbuf.TypeTimestamp: + var dt flatbuf.Timestamp + dt.Init(data.Bytes, data.Pos) + return timestampFromFB(dt) + + case flatbuf.TypeDate: + var dt flatbuf.Date + dt.Init(data.Bytes, data.Pos) + return dateFromFB(dt) + + case flatbuf.TypeInterval: + var dt flatbuf.Interval + dt.Init(data.Bytes, data.Pos) + return intervalFromFB(dt) + + case flatbuf.TypeDuration: + var dt flatbuf.Duration + dt.Init(data.Bytes, data.Pos) + return durationFromFB(dt) + + case flatbuf.TypeMap: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: Map must have exactly 1 child field") + } + + if children[0].Nullable || children[0].Type.ID() != arrow.STRUCT || len(children[0].Type.(*arrow.StructType).Fields()) != 2 { + return nil, fmt.Errorf("arrow/ipc: Map's key-item pairs must be non-nullable structs") + } + + pairType := children[0].Type.(*arrow.StructType) + if pairType.Field(0).Nullable { + return nil, fmt.Errorf("arrow/ipc: Map's keys must be non-nullable") + } + + var dt flatbuf.Map + dt.Init(data.Bytes, data.Pos) + ret := arrow.MapOf(pairType.Field(0).Type, pairType.Field(1).Type) + ret.SetItemNullable(pairType.Field(1).Nullable) + ret.KeysSorted = dt.KeysSorted() + return ret, nil + + case flatbuf.TypeRunEndEncoded: + if len(children) != 2 { + return nil, fmt.Errorf("%w: arrow/ipc: RunEndEncoded must have exactly 2 child fields", arrow.ErrInvalid) + } + switch children[0].Type.ID() { + case arrow.INT16, arrow.INT32, arrow.INT64: + default: + return nil, fmt.Errorf("%w: arrow/ipc: run-end encoded run_ends field must be one of int16, int32, or int64 type", arrow.ErrInvalid) + } + return arrow.RunEndEncodedOf(children[0].Type, children[1].Type), nil + + default: + panic(fmt.Errorf("arrow/ipc: type %v not implemented", flatbuf.EnumNamesType[typ])) + } +} + +func intFromFB(data flatbuf.Int) (arrow.DataType, error) { + bw := data.BitWidth() + if bw > 64 { + return nil, fmt.Errorf("arrow/ipc: integers with more than 64 bits not implemented (bits=%d)", bw) + } + if bw < 8 { + return nil, fmt.Errorf("arrow/ipc: integers with less than 8 bits not implemented (bits=%d)", bw) + } + + switch bw { + case 8: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint8, nil + } + return arrow.PrimitiveTypes.Int8, nil + + case 16: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint16, nil + } + return arrow.PrimitiveTypes.Int16, nil + + case 32: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint32, nil + } + return arrow.PrimitiveTypes.Int32, nil + + case 64: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint64, nil + } + return arrow.PrimitiveTypes.Int64, nil + default: + return nil, fmt.Errorf("arrow/ipc: integers not in cstdint are not implemented") + } +} + +func intToFB(b *flatbuffers.Builder, bw int32, isSigned bool) flatbuffers.UOffsetT { + flatbuf.IntStart(b) + flatbuf.IntAddBitWidth(b, bw) + flatbuf.IntAddIsSigned(b, isSigned) + return flatbuf.IntEnd(b) +} + +func floatFromFB(data flatbuf.FloatingPoint) (arrow.DataType, error) { + switch p := data.Precision(); p { + case flatbuf.PrecisionHALF: + return arrow.FixedWidthTypes.Float16, nil + case flatbuf.PrecisionSINGLE: + return arrow.PrimitiveTypes.Float32, nil + case flatbuf.PrecisionDOUBLE: + return arrow.PrimitiveTypes.Float64, nil + default: + return nil, fmt.Errorf("arrow/ipc: floating point type with %d precision not implemented", p) + } +} + +func floatToFB(b *flatbuffers.Builder, bw int32) flatbuffers.UOffsetT { + switch bw { + case 16: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionHALF) + return flatbuf.FloatingPointEnd(b) + case 32: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionSINGLE) + return flatbuf.FloatingPointEnd(b) + case 64: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionDOUBLE) + return flatbuf.FloatingPointEnd(b) + default: + panic(fmt.Errorf("arrow/ipc: invalid floating point precision %d-bits", bw)) + } +} + +func decimalFromFB(data flatbuf.Decimal) (arrow.DataType, error) { + switch data.BitWidth() { + case 128: + return &arrow.Decimal128Type{Precision: data.Precision(), Scale: data.Scale()}, nil + case 256: + return &arrow.Decimal256Type{Precision: data.Precision(), Scale: data.Scale()}, nil + default: + return nil, fmt.Errorf("arrow/ipc: invalid decimal bitwidth: %d", data.BitWidth()) + } +} + +func timeFromFB(data flatbuf.Time) (arrow.DataType, error) { + bw := data.BitWidth() + unit := unitFromFB(data.Unit()) + + switch bw { + case 32: + switch unit { + case arrow.Millisecond: + return arrow.FixedWidthTypes.Time32ms, nil + case arrow.Second: + return arrow.FixedWidthTypes.Time32s, nil + default: + return nil, fmt.Errorf("arrow/ipc: Time32 type with %v unit not implemented", unit) + } + case 64: + switch unit { + case arrow.Nanosecond: + return arrow.FixedWidthTypes.Time64ns, nil + case arrow.Microsecond: + return arrow.FixedWidthTypes.Time64us, nil + default: + return nil, fmt.Errorf("arrow/ipc: Time64 type with %v unit not implemented", unit) + } + default: + return nil, fmt.Errorf("arrow/ipc: Time type with %d bitwidth not implemented", bw) + } +} + +func timestampFromFB(data flatbuf.Timestamp) (arrow.DataType, error) { + unit := unitFromFB(data.Unit()) + tz := string(data.Timezone()) + return &arrow.TimestampType{Unit: unit, TimeZone: tz}, nil +} + +func dateFromFB(data flatbuf.Date) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.DateUnitDAY: + return arrow.FixedWidthTypes.Date32, nil + case flatbuf.DateUnitMILLISECOND: + return arrow.FixedWidthTypes.Date64, nil + } + return nil, fmt.Errorf("arrow/ipc: Date type with %d unit not implemented", data.Unit()) +} + +func intervalFromFB(data flatbuf.Interval) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.IntervalUnitYEAR_MONTH: + return arrow.FixedWidthTypes.MonthInterval, nil + case flatbuf.IntervalUnitDAY_TIME: + return arrow.FixedWidthTypes.DayTimeInterval, nil + case flatbuf.IntervalUnitMONTH_DAY_NANO: + return arrow.FixedWidthTypes.MonthDayNanoInterval, nil + } + return nil, fmt.Errorf("arrow/ipc: Interval type with %d unit not implemented", data.Unit()) +} + +func durationFromFB(data flatbuf.Duration) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.TimeUnitSECOND: + return arrow.FixedWidthTypes.Duration_s, nil + case flatbuf.TimeUnitMILLISECOND: + return arrow.FixedWidthTypes.Duration_ms, nil + case flatbuf.TimeUnitMICROSECOND: + return arrow.FixedWidthTypes.Duration_us, nil + case flatbuf.TimeUnitNANOSECOND: + return arrow.FixedWidthTypes.Duration_ns, nil + } + return nil, fmt.Errorf("arrow/ipc: Duration type with %d unit not implemented", data.Unit()) +} + +type customMetadataer interface { + CustomMetadataLength() int + CustomMetadata(*flatbuf.KeyValue, int) bool +} + +func metadataFromFB(md customMetadataer) (arrow.Metadata, error) { + var ( + keys = make([]string, md.CustomMetadataLength()) + vals = make([]string, md.CustomMetadataLength()) + ) + + for i := range keys { + var kv flatbuf.KeyValue + if !md.CustomMetadata(&kv, i) { + return arrow.Metadata{}, fmt.Errorf("arrow/ipc: could not read key-value %d from flatbuffer", i) + } + keys[i] = string(kv.Key()) + vals[i] = string(kv.Value()) + } + + return arrow.NewMetadata(keys, vals), nil +} + +func metadataToFB(b *flatbuffers.Builder, meta arrow.Metadata, start startVecFunc) flatbuffers.UOffsetT { + if meta.Len() == 0 { + return 0 + } + + n := meta.Len() + kvs := make([]flatbuffers.UOffsetT, n) + for i := range kvs { + k := b.CreateString(meta.Keys()[i]) + v := b.CreateString(meta.Values()[i]) + flatbuf.KeyValueStart(b) + flatbuf.KeyValueAddKey(b, k) + flatbuf.KeyValueAddValue(b, v) + kvs[i] = flatbuf.KeyValueEnd(b) + } + + start(b, n) + for i := n - 1; i >= 0; i-- { + b.PrependUOffsetT(kvs[i]) + } + return b.EndVector(n) +} + +func schemaFromFB(schema *flatbuf.Schema, memo *dictutils.Memo) (*arrow.Schema, error) { + var ( + err error + fields = make([]arrow.Field, schema.FieldsLength()) + pos = dictutils.NewFieldPos() + ) + + for i := range fields { + var field flatbuf.Field + if !schema.Fields(&field, i) { + return nil, fmt.Errorf("arrow/ipc: could not read field %d from schema", i) + } + + fields[i], err = fieldFromFB(&field, pos.Child(int32(i)), memo) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not convert field %d from flatbuf: %w", i, err) + } + } + + md, err := metadataFromFB(schema) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not convert schema metadata from flatbuf: %w", err) + } + + return arrow.NewSchemaWithEndian(fields, &md, endian.Endianness(schema.Endianness())), nil +} + +func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictutils.Mapper) flatbuffers.UOffsetT { + fields := make([]flatbuffers.UOffsetT, len(schema.Fields())) + pos := dictutils.NewFieldPos() + for i, field := range schema.Fields() { + fields[i] = fieldToFB(b, pos.Child(int32(i)), field, memo) + } + + flatbuf.SchemaStartFieldsVector(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + b.PrependUOffsetT(fields[i]) + } + fieldsFB := b.EndVector(len(fields)) + + metaFB := metadataToFB(b, schema.Metadata(), flatbuf.SchemaStartCustomMetadataVector) + + flatbuf.SchemaStart(b) + flatbuf.SchemaAddEndianness(b, flatbuf.Endianness(schema.Endianness())) + flatbuf.SchemaAddFields(b, fieldsFB) + flatbuf.SchemaAddCustomMetadata(b, metaFB) + offset := flatbuf.SchemaEnd(b) + + return offset +} + +// payloadFromSchema returns a slice of payloads corresponding to the given schema. +// Callers of payloadFromSchema will need to call Release after use. +func payloadFromSchema(schema *arrow.Schema, mem memory.Allocator, memo *dictutils.Mapper) payloads { + ps := make(payloads, 1) + ps[0].msg = MessageSchema + ps[0].meta = writeSchemaMessage(schema, mem, memo) + + return ps +} + +func writeFBBuilder(b *flatbuffers.Builder, mem memory.Allocator) *memory.Buffer { + raw := b.FinishedBytes() + buf := memory.NewResizableBuffer(mem) + buf.Resize(len(raw)) + copy(buf.Bytes(), raw) + return buf +} + +func writeMessageFB(b *flatbuffers.Builder, mem memory.Allocator, hdrType flatbuf.MessageHeader, hdr flatbuffers.UOffsetT, bodyLen int64) *memory.Buffer { + + flatbuf.MessageStart(b) + flatbuf.MessageAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion)) + flatbuf.MessageAddHeaderType(b, hdrType) + flatbuf.MessageAddHeader(b, hdr) + flatbuf.MessageAddBodyLength(b, bodyLen) + msg := flatbuf.MessageEnd(b) + b.Finish(msg) + + return writeFBBuilder(b, mem) +} + +func writeSchemaMessage(schema *arrow.Schema, mem memory.Allocator, dict *dictutils.Mapper) *memory.Buffer { + b := flatbuffers.NewBuilder(1024) + schemaFB := schemaToFB(b, schema, dict) + return writeMessageFB(b, mem, flatbuf.MessageHeaderSchema, schemaFB, 0) +} + +func writeFileFooter(schema *arrow.Schema, dicts, recs []fileBlock, w io.Writer) error { + var ( + b = flatbuffers.NewBuilder(1024) + memo dictutils.Mapper + ) + memo.ImportSchema(schema) + + schemaFB := schemaToFB(b, schema, &memo) + dictsFB := fileBlocksToFB(b, dicts, flatbuf.FooterStartDictionariesVector) + recsFB := fileBlocksToFB(b, recs, flatbuf.FooterStartRecordBatchesVector) + + flatbuf.FooterStart(b) + flatbuf.FooterAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion)) + flatbuf.FooterAddSchema(b, schemaFB) + flatbuf.FooterAddDictionaries(b, dictsFB) + flatbuf.FooterAddRecordBatches(b, recsFB) + footer := flatbuf.FooterEnd(b) + + b.Finish(footer) + + _, err := w.Write(b.FinishedBytes()) + return err +} + +func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer { + b := flatbuffers.NewBuilder(0) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec) + return writeMessageFB(b, mem, flatbuf.MessageHeaderRecordBatch, recFB, bodyLength) +} + +func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer { + b := flatbuffers.NewBuilder(0) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec) + + flatbuf.DictionaryBatchStart(b) + flatbuf.DictionaryBatchAddId(b, id) + flatbuf.DictionaryBatchAddData(b, recFB) + flatbuf.DictionaryBatchAddIsDelta(b, isDelta) + dictFB := flatbuf.DictionaryBatchEnd(b) + return writeMessageFB(b, mem, flatbuf.MessageHeaderDictionaryBatch, dictFB, bodyLength) +} + +func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) flatbuffers.UOffsetT { + fieldsFB := writeFieldNodes(b, fields, flatbuf.RecordBatchStartNodesVector) + metaFB := writeBuffers(b, meta, flatbuf.RecordBatchStartBuffersVector) + var bodyCompressFB flatbuffers.UOffsetT + if codec != -1 { + bodyCompressFB = writeBodyCompression(b, codec) + } + + flatbuf.RecordBatchStart(b) + flatbuf.RecordBatchAddLength(b, size) + flatbuf.RecordBatchAddNodes(b, fieldsFB) + flatbuf.RecordBatchAddBuffers(b, metaFB) + if codec != -1 { + flatbuf.RecordBatchAddCompression(b, bodyCompressFB) + } + + return flatbuf.RecordBatchEnd(b) +} + +func writeFieldNodes(b *flatbuffers.Builder, fields []fieldMetadata, start startVecFunc) flatbuffers.UOffsetT { + + start(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + field := fields[i] + if field.Offset != 0 { + panic(fmt.Errorf("arrow/ipc: field metadata for IPC must have offset 0")) + } + flatbuf.CreateFieldNode(b, field.Len, field.Nulls) + } + + return b.EndVector(len(fields)) +} + +func writeBuffers(b *flatbuffers.Builder, buffers []bufferMetadata, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(buffers)) + for i := len(buffers) - 1; i >= 0; i-- { + buffer := buffers[i] + flatbuf.CreateBuffer(b, buffer.Offset, buffer.Len) + } + return b.EndVector(len(buffers)) +} + +func writeBodyCompression(b *flatbuffers.Builder, codec flatbuf.CompressionType) flatbuffers.UOffsetT { + flatbuf.BodyCompressionStart(b) + flatbuf.BodyCompressionAddCodec(b, codec) + flatbuf.BodyCompressionAddMethod(b, flatbuf.BodyCompressionMethodBUFFER) + return flatbuf.BodyCompressionEnd(b) +} + +func writeMessage(msg *memory.Buffer, alignment int32, w io.Writer) (int, error) { + var ( + n int + err error + ) + + // ARROW-3212: we do not make any assumption on whether the output stream is aligned or not. + paddedMsgLen := int32(msg.Len()) + 8 + remainder := paddedMsgLen % alignment + if remainder != 0 { + paddedMsgLen += alignment - remainder + } + + tmp := make([]byte, 4) + + // write continuation indicator, to address 8-byte alignment requirement from FlatBuffers. + binary.LittleEndian.PutUint32(tmp, kIPCContToken) + _, err = w.Write(tmp) + if err != nil { + return 0, fmt.Errorf("arrow/ipc: could not write continuation bit indicator: %w", err) + } + + // the returned message size includes the length prefix, the flatbuffer, + padding + n = int(paddedMsgLen) + + // write the flatbuffer size prefix, including padding + sizeFB := paddedMsgLen - 8 + binary.LittleEndian.PutUint32(tmp, uint32(sizeFB)) + _, err = w.Write(tmp) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer size prefix: %w", err) + } + + // write the flatbuffer + _, err = w.Write(msg.Bytes()) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer: %w", err) + } + + // write any padding + padding := paddedMsgLen - int32(msg.Len()) - 8 + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message padding bytes: %w", err) + } + } + + return n, err +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go new file mode 100644 index 00000000..9c502f6a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go @@ -0,0 +1,284 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "bytes" + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/internal/dictutils" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +// Reader reads records from an io.Reader. +// Reader expects a schema (plus any dictionaries) as the first messages +// in the stream, followed by records. +type Reader struct { + r MessageReader + schema *arrow.Schema + + refCount int64 + rec arrow.Record + err error + + // types dictTypeMap + memo dictutils.Memo + readInitialDicts bool + done bool + swapEndianness bool + ensureNativeEndian bool + expectedSchema *arrow.Schema + + mem memory.Allocator +} + +// NewReaderFromMessageReader allows constructing a new reader object with the +// provided MessageReader allowing injection of reading messages other than +// by simple streaming bytes such as Arrow Flight which receives a protobuf message +func NewReaderFromMessageReader(r MessageReader, opts ...Option) (reader *Reader, err error) { + defer func() { + if pErr := recover(); pErr != nil { + err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr) + } + }() + cfg := newConfig() + for _, opt := range opts { + opt(cfg) + } + + rr := &Reader{ + r: r, + refCount: 1, + // types: make(dictTypeMap), + memo: dictutils.NewMemo(), + mem: cfg.alloc, + ensureNativeEndian: cfg.ensureNativeEndian, + expectedSchema: cfg.schema, + } + + if !cfg.noAutoSchema { + if err := rr.readSchema(cfg.schema); err != nil { + return nil, err + } + } + + return rr, nil +} + +// NewReader returns a reader that reads records from an input stream. +func NewReader(r io.Reader, opts ...Option) (*Reader, error) { + return NewReaderFromMessageReader(NewMessageReader(r, opts...), opts...) +} + +// Err returns the last error encountered during the iteration over the +// underlying stream. +func (r *Reader) Err() error { return r.err } + +func (r *Reader) Schema() *arrow.Schema { + if r.schema == nil { + if err := r.readSchema(r.expectedSchema); err != nil { + r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err) + r.done = true + } + } + return r.schema +} + +func (r *Reader) readSchema(schema *arrow.Schema) error { + msg, err := r.r.Message() + if err != nil { + return fmt.Errorf("arrow/ipc: could not read message schema: %w", err) + } + + if msg.Type() != MessageSchema { + return fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v)", msg.Type(), MessageSchema) + } + + // FIXME(sbinet) refactor msg-header handling. + var schemaFB flatbuf.Schema + initFB(&schemaFB, msg.msg.Header) + + r.schema, err = schemaFromFB(&schemaFB, &r.memo) + if err != nil { + return fmt.Errorf("arrow/ipc: could not decode schema from message schema: %w", err) + } + + // check the provided schema match the one read from stream. + if schema != nil && !schema.Equal(r.schema) { + return errInconsistentSchema + } + + if r.ensureNativeEndian && !r.schema.IsNativeEndian() { + r.swapEndianness = true + r.schema = r.schema.WithEndianness(endian.NativeEndian) + } + + return nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *Reader) Retain() { + atomic.AddInt64(&r.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *Reader) Release() { + debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases") + + if atomic.AddInt64(&r.refCount, -1) == 0 { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + if r.r != nil { + r.r.Release() + r.r = nil + } + } +} + +// Next returns whether a Record could be extracted from the underlying stream. +func (r *Reader) Next() bool { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if r.err != nil || r.done { + return false + } + + return r.next() +} + +func (r *Reader) getInitialDicts() bool { + var msg *Message + // we have to get all dictionaries before reconstructing the first + // record. subsequent deltas and replacements modify the memo + numDicts := r.memo.Mapper.NumDicts() + // there should be numDicts dictionary messages + for i := 0; i < numDicts; i++ { + msg, r.err = r.r.Message() + if r.err != nil { + r.done = true + if r.err == io.EOF { + if i == 0 { + r.err = nil + } else { + r.err = fmt.Errorf("arrow/ipc: IPC stream ended without reading the expected (%d) dictionaries", numDicts) + } + } + return false + } + + if msg.Type() != MessageDictionaryBatch { + r.err = fmt.Errorf("arrow/ipc: IPC stream did not have the expected (%d) dictionaries at the start of the stream", numDicts) + } + if _, err := readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); err != nil { + r.done = true + r.err = err + return false + } + } + r.readInitialDicts = true + return true +} + +func (r *Reader) next() bool { + defer func() { + if pErr := recover(); pErr != nil { + r.err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr) + } + }() + if r.schema == nil { + if err := r.readSchema(r.expectedSchema); err != nil { + r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err) + r.done = true + return false + } + } + + if !r.readInitialDicts && !r.getInitialDicts() { + return false + } + + var msg *Message + msg, r.err = r.r.Message() + + for msg != nil && msg.Type() == MessageDictionaryBatch { + if _, r.err = readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); r.err != nil { + r.done = true + return false + } + msg, r.err = r.r.Message() + } + if r.err != nil { + r.done = true + if errors.Is(r.err, io.EOF) { + r.err = nil + } + return false + } + + if got, want := msg.Type(), MessageRecordBatch; got != want { + r.err = fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v", got, want) + return false + } + + r.rec = newRecord(r.schema, &r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem) + return true +} + +// Record returns the current record that has been extracted from the +// underlying stream. +// It is valid until the next call to Next. +func (r *Reader) Record() arrow.Record { + return r.rec +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +func (r *Reader) Read() (arrow.Record, error) { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if !r.next() { + if r.done && r.err == nil { + return nil, io.EOF + } + return nil, r.err + } + + return r.rec, nil +} + +var ( + _ array.RecordReader = (*Reader)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go new file mode 100644 index 00000000..93c6d8df --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go @@ -0,0 +1,905 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sync" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/arrow/internal" + "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v12/arrow/internal/dictutils" + "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v12/arrow/memory" +) + +type swriter struct { + w io.Writer + pos int64 +} + +func (w *swriter) Start() error { return nil } +func (w *swriter) Close() error { + _, err := w.Write(kEOS[:]) + return err +} + +func (w *swriter) WritePayload(p Payload) error { + _, err := writeIPCPayload(w, p) + if err != nil { + return err + } + return nil +} + +func (w *swriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.pos += int64(n) + return n, err +} + +func hasNestedDict(data arrow.ArrayData) bool { + if data.DataType().ID() == arrow.DICTIONARY { + return true + } + for _, c := range data.Children() { + if hasNestedDict(c) { + return true + } + } + return false +} + +// Writer is an Arrow stream writer. +type Writer struct { + w io.Writer + + mem memory.Allocator + pw PayloadWriter + + started bool + schema *arrow.Schema + mapper dictutils.Mapper + codec flatbuf.CompressionType + compressNP int + minSpaceSavings *float64 + + // map of the last written dictionaries by id + // so we can avoid writing the same dictionary over and over + lastWrittenDicts map[int64]arrow.Array + emitDictDeltas bool +} + +// NewWriterWithPayloadWriter constructs a writer with the provided payload writer +// instead of the default stream payload writer. This makes the writer more +// reusable such as by the Arrow Flight writer. +func NewWriterWithPayloadWriter(pw PayloadWriter, opts ...Option) *Writer { + cfg := newConfig(opts...) + return &Writer{ + mem: cfg.alloc, + pw: pw, + schema: cfg.schema, + codec: cfg.codec, + compressNP: cfg.compressNP, + minSpaceSavings: cfg.minSpaceSavings, + emitDictDeltas: cfg.emitDictDeltas, + } +} + +// NewWriter returns a writer that writes records to the provided output stream. +func NewWriter(w io.Writer, opts ...Option) *Writer { + cfg := newConfig(opts...) + return &Writer{ + w: w, + mem: cfg.alloc, + pw: &swriter{w: w}, + schema: cfg.schema, + codec: cfg.codec, + emitDictDeltas: cfg.emitDictDeltas, + } +} + +func (w *Writer) Close() error { + if !w.started { + err := w.start() + if err != nil { + return err + } + } + + if w.pw == nil { + return nil + } + + err := w.pw.Close() + if err != nil { + return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + w.pw = nil + + for _, d := range w.lastWrittenDicts { + d.Release() + } + + return nil +} + +func (w *Writer) Write(rec arrow.Record) (err error) { + defer func() { + if pErr := recover(); pErr != nil { + err = fmt.Errorf("arrow/ipc: unknown error while writing: %v", pErr) + } + }() + + if !w.started { + err := w.start() + if err != nil { + return err + } + } + + schema := rec.Schema() + if schema == nil || !schema.Equal(w.schema) { + return errInconsistentSchema + } + + const allow64b = true + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder(w.mem, 0, kMaxNestingDepth, allow64b, w.codec, w.compressNP, w.minSpaceSavings) + ) + defer data.Release() + + err = writeDictionaryPayloads(w.mem, rec, false, w.emitDictDeltas, &w.mapper, w.lastWrittenDicts, w.pw, enc) + if err != nil { + return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err) + } + + enc.reset() + if err := enc.Encode(&data, rec); err != nil { + return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return w.pw.WritePayload(data) +} + +func writeDictionaryPayloads(mem memory.Allocator, batch arrow.Record, isFileFormat bool, emitDictDeltas bool, mapper *dictutils.Mapper, lastWrittenDicts map[int64]arrow.Array, pw PayloadWriter, encoder *recordEncoder) error { + dictionaries, err := dictutils.CollectDictionaries(batch, mapper) + if err != nil { + return err + } + defer func() { + for _, d := range dictionaries { + d.Dict.Release() + } + }() + + eqopt := array.WithNaNsEqual(true) + for _, pair := range dictionaries { + encoder.reset() + var ( + deltaStart int64 + enc = dictEncoder{encoder} + ) + lastDict, exists := lastWrittenDicts[pair.ID] + if exists { + if lastDict.Data() == pair.Dict.Data() { + continue + } + newLen, lastLen := pair.Dict.Len(), lastDict.Len() + if lastLen == newLen && array.ApproxEqual(lastDict, pair.Dict, eqopt) { + // same dictionary by value + // might cost CPU, but required for IPC file format + continue + } + if isFileFormat { + return errors.New("arrow/ipc: Dictionary replacement detected when writing IPC file format. Arrow IPC File only supports single dictionary per field") + } + + if newLen > lastLen && + emitDictDeltas && + !hasNestedDict(pair.Dict.Data()) && + (array.SliceApproxEqual(lastDict, 0, int64(lastLen), pair.Dict, 0, int64(lastLen), eqopt)) { + deltaStart = int64(lastLen) + } + } + + var data = Payload{msg: MessageDictionaryBatch} + defer data.Release() + + dict := pair.Dict + if deltaStart > 0 { + dict = array.NewSlice(dict, deltaStart, int64(dict.Len())) + defer dict.Release() + } + if err := enc.Encode(&data, pair.ID, deltaStart > 0, dict); err != nil { + return err + } + + if err := pw.WritePayload(data); err != nil { + return err + } + + lastWrittenDicts[pair.ID] = pair.Dict + if lastDict != nil { + lastDict.Release() + } + pair.Dict.Retain() + } + return nil +} + +func (w *Writer) start() error { + w.started = true + + w.mapper.ImportSchema(w.schema) + w.lastWrittenDicts = make(map[int64]arrow.Array) + + // write out schema payloads + ps := payloadFromSchema(w.schema, w.mem, &w.mapper) + defer ps.Release() + + for _, data := range ps { + err := w.pw.WritePayload(data) + if err != nil { + return err + } + } + + return nil +} + +type dictEncoder struct { + *recordEncoder +} + +func (d *dictEncoder) encodeMetadata(p *Payload, isDelta bool, id, nrows int64) error { + p.meta = writeDictionaryMessage(d.mem, id, isDelta, nrows, p.size, d.fields, d.meta, d.codec) + return nil +} + +func (d *dictEncoder) Encode(p *Payload, id int64, isDelta bool, dict arrow.Array) error { + d.start = 0 + defer func() { + d.start = 0 + }() + + schema := arrow.NewSchema([]arrow.Field{{Name: "dictionary", Type: dict.DataType(), Nullable: true}}, nil) + batch := array.NewRecord(schema, []arrow.Array{dict}, int64(dict.Len())) + defer batch.Release() + if err := d.encode(p, batch); err != nil { + return err + } + + return d.encodeMetadata(p, isDelta, id, batch.NumRows()) +} + +type recordEncoder struct { + mem memory.Allocator + + fields []fieldMetadata + meta []bufferMetadata + + depth int64 + start int64 + allow64b bool + codec flatbuf.CompressionType + compressNP int + minSpaceSavings *float64 +} + +func newRecordEncoder(mem memory.Allocator, startOffset, maxDepth int64, allow64b bool, codec flatbuf.CompressionType, compressNP int, minSpaceSavings *float64) *recordEncoder { + return &recordEncoder{ + mem: mem, + start: startOffset, + depth: maxDepth, + allow64b: allow64b, + codec: codec, + compressNP: compressNP, + minSpaceSavings: minSpaceSavings, + } +} + +func (w *recordEncoder) shouldCompress(uncompressed, compressed int) bool { + debug.Assert(uncompressed > 0, "uncompressed size is 0") + if w.minSpaceSavings == nil { + return true + } + + savings := 1.0 - float64(compressed)/float64(uncompressed) + return savings >= *w.minSpaceSavings +} + +func (w *recordEncoder) reset() { + w.start = 0 + w.fields = make([]fieldMetadata, 0) +} + +func (w *recordEncoder) compressBodyBuffers(p *Payload) error { + compress := func(idx int, codec compressor) error { + if p.body[idx] == nil || p.body[idx].Len() == 0 { + return nil + } + + buf := memory.NewResizableBuffer(w.mem) + buf.Reserve(codec.MaxCompressedLen(p.body[idx].Len()) + arrow.Int64SizeBytes) + + binary.LittleEndian.PutUint64(buf.Buf(), uint64(p.body[idx].Len())) + bw := &bufferWriter{buf: buf, pos: arrow.Int64SizeBytes} + codec.Reset(bw) + + n, err := codec.Write(p.body[idx].Bytes()) + if err != nil { + return err + } + if err := codec.Close(); err != nil { + return err + } + + finalLen := bw.pos + compressedLen := bw.pos - arrow.Int64SizeBytes + if !w.shouldCompress(n, compressedLen) { + n = copy(buf.Buf()[arrow.Int64SizeBytes:], p.body[idx].Bytes()) + // size of -1 indicates to the reader that the body + // doesn't need to be decompressed + var noprefix int64 = -1 + binary.LittleEndian.PutUint64(buf.Buf(), uint64(noprefix)) + finalLen = n + arrow.Int64SizeBytes + } + bw.buf.Resize(finalLen) + p.body[idx].Release() + p.body[idx] = buf + return nil + } + + if w.compressNP <= 1 { + codec := getCompressor(w.codec) + for idx := range p.body { + if err := compress(idx, codec); err != nil { + return err + } + } + return nil + } + + var ( + wg sync.WaitGroup + ch = make(chan int) + errch = make(chan error) + ctx, cancel = context.WithCancel(context.Background()) + ) + defer cancel() + + for i := 0; i < w.compressNP; i++ { + wg.Add(1) + go func() { + defer wg.Done() + codec := getCompressor(w.codec) + for { + select { + case idx, ok := <-ch: + if !ok { + // we're done, channel is closed! + return + } + + if err := compress(idx, codec); err != nil { + errch <- err + cancel() + return + } + case <-ctx.Done(): + // cancelled, return early + return + } + } + }() + } + + for idx := range p.body { + ch <- idx + } + + close(ch) + wg.Wait() + close(errch) + + return <-errch +} + +func (w *recordEncoder) encode(p *Payload, rec arrow.Record) error { + // perform depth-first traversal of the row-batch + for i, col := range rec.Columns() { + err := w.visit(p, col) + if err != nil { + return fmt.Errorf("arrow/ipc: could not encode column %d (%q): %w", i, rec.ColumnName(i), err) + } + } + + if w.codec != -1 { + if w.minSpaceSavings != nil { + pct := *w.minSpaceSavings + if pct < 0 || pct > 1 { + p.Release() + return fmt.Errorf("%w: minSpaceSavings not in range [0,1]. Provided %.05f", + arrow.ErrInvalid, pct) + } + } + w.compressBodyBuffers(p) + } + + // position for the start of a buffer relative to the passed frame of reference. + // may be 0 or some other position in an address space. + offset := w.start + w.meta = make([]bufferMetadata, len(p.body)) + + // construct the metadata for the record batch header + for i, buf := range p.body { + var ( + size int64 + padding int64 + ) + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + w.meta[i] = bufferMetadata{ + Offset: offset, + // even though we add padding, we need the Len to be correct + // so that decompressing works properly. + Len: size, + } + offset += size + padding + } + + p.size = offset - w.start + if !bitutil.IsMultipleOf8(p.size) { + panic("not aligned") + } + + return nil +} + +func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { + if w.depth <= 0 { + return errMaxRecursion + } + + if !w.allow64b && arr.Len() > math.MaxInt32 { + return errBigArray + } + + if arr.DataType().ID() == arrow.EXTENSION { + arr := arr.(array.ExtensionArray) + err := w.visit(p, arr.Storage()) + if err != nil { + return fmt.Errorf("failed visiting storage of for array %T: %w", arr, err) + } + return nil + } + + if arr.DataType().ID() == arrow.DICTIONARY { + arr := arr.(*array.Dictionary) + return w.visit(p, arr.Indices()) + } + + // add all common elements + w.fields = append(w.fields, fieldMetadata{ + Len: int64(arr.Len()), + Nulls: int64(arr.NullN()), + Offset: 0, + }) + + if arr.DataType().ID() == arrow.NULL { + return nil + } + + if internal.HasValidityBitmap(arr.DataType().ID(), flatbuf.MetadataVersion(currentMetadataVersion)) { + switch arr.NullN() { + case 0: + // there are no null values, drop the null bitmap + p.body = append(p.body, nil) + default: + data := arr.Data() + var bitmap *memory.Buffer + if data.NullN() == data.Len() { + // every value is null, just use a new zero-initialized bitmap to avoid the expense of copying + bitmap = memory.NewResizableBuffer(w.mem) + minLength := paddedLength(bitutil.BytesForBits(int64(data.Len())), kArrowAlignment) + bitmap.Resize(int(minLength)) + } else { + // otherwise truncate and copy the bits + bitmap = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[0]) + } + p.body = append(p.body, bitmap) + } + } + + switch dtype := arr.DataType().(type) { + case *arrow.NullType: + // ok. NullArrays are completely empty. + + case *arrow.BooleanType: + var ( + data = arr.Data() + bitm *memory.Buffer + ) + + if data.Len() != 0 { + bitm = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[1]) + } + p.body = append(p.body, bitm) + + case arrow.FixedWidthDataType: + data := arr.Data() + values := data.Buffers()[1] + arrLen := int64(arr.Len()) + typeWidth := int64(dtype.BitWidth() / 8) + minLength := paddedLength(arrLen*typeWidth, kArrowAlignment) + + switch { + case needTruncate(int64(data.Offset()), values, minLength): + // non-zero offset: slice the buffer + offset := int64(data.Offset()) * typeWidth + // send padding if available + len := minI64(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len())-offset) + values = memory.NewBufferBytes(values.Bytes()[offset : offset+len]) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, values) + + case *arrow.BinaryType, *arrow.LargeBinaryType, *arrow.StringType, *arrow.LargeStringType: + arr := arr.(array.BinaryLike) + voffsets, err := w.getZeroBasedValueOffsets(arr) + if err != nil { + return fmt.Errorf("could not retrieve zero-based value offsets from %T: %w", arr, err) + } + data := arr.Data() + values := data.Buffers()[2] + + var totalDataBytes int64 + if voffsets != nil { + totalDataBytes = int64(len(arr.ValueBytes())) + } + + switch { + case needTruncate(int64(data.Offset()), values, totalDataBytes): + // slice data buffer to include the range we need now. + var ( + beg = arr.ValueOffset64(0) + len = minI64(paddedLength(totalDataBytes, kArrowAlignment), int64(totalDataBytes)) + ) + values = memory.NewBufferBytes(data.Buffers()[2].Bytes()[beg : beg+len]) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, voffsets) + p.body = append(p.body, values) + + case *arrow.StructType: + w.depth-- + arr := arr.(*array.Struct) + for i := 0; i < arr.NumField(); i++ { + err := w.visit(p, arr.Field(i)) + if err != nil { + return fmt.Errorf("could not visit field %d of struct-array: %w", i, err) + } + } + w.depth++ + + case *arrow.SparseUnionType: + offset, length := arr.Data().Offset(), arr.Len() + arr := arr.(*array.SparseUnion) + typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes()) + p.body = append(p.body, typeCodes) + + w.depth-- + for i := 0; i < arr.NumFields(); i++ { + err := w.visit(p, arr.Field(i)) + if err != nil { + return fmt.Errorf("could not visit field %d of sparse union array: %w", i, err) + } + } + w.depth++ + case *arrow.DenseUnionType: + offset, length := arr.Data().Offset(), arr.Len() + arr := arr.(*array.DenseUnion) + typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes()) + p.body = append(p.body, typeCodes) + + w.depth-- + dt := arr.UnionType() + + // union type codes are not necessarily 0-indexed + maxCode := dt.MaxTypeCode() + + // allocate an array of child offsets. Set all to -1 to indicate we + // haven't observed a first occurrence of a particular child yet + offsets := make([]int32, maxCode+1) + lengths := make([]int32, maxCode+1) + offsets[0], lengths[0] = -1, 0 + for i := 1; i < len(offsets); i *= 2 { + copy(offsets[i:], offsets[:i]) + copy(lengths[i:], lengths[:i]) + } + + var valueOffsets *memory.Buffer + if offset != 0 { + valueOffsets = w.rebaseDenseUnionValueOffsets(arr, offsets, lengths) + } else { + valueOffsets = getTruncatedBuffer(int64(offset), int64(length), int32(arrow.Int32SizeBytes), arr.ValueOffsets()) + } + p.body = append(p.body, valueOffsets) + + // visit children and slice accordingly + for i := range dt.Fields() { + child := arr.Field(i) + // for sliced unions it's tricky to know how much to truncate + // the children. For now we'll truncate the children to be + // no longer than the parent union. + + if offset != 0 { + code := dt.TypeCodes()[i] + childOffset := offsets[code] + childLen := lengths[code] + + if childOffset > 0 { + child = array.NewSlice(child, int64(childOffset), int64(childOffset+childLen)) + defer child.Release() + } else if childLen < int32(child.Len()) { + child = array.NewSlice(child, 0, int64(childLen)) + defer child.Release() + } + } + if err := w.visit(p, child); err != nil { + return fmt.Errorf("could not visit field %d of dense union array: %w", i, err) + } + } + w.depth++ + case *arrow.MapType, *arrow.ListType, *arrow.LargeListType: + arr := arr.(array.ListLike) + voffsets, err := w.getZeroBasedValueOffsets(arr) + if err != nil { + return fmt.Errorf("could not retrieve zero-based value offsets for array %T: %w", arr, err) + } + p.body = append(p.body, voffsets) + + w.depth-- + var ( + values = arr.ListValues() + mustRelease = false + values_offset int64 + values_end int64 + ) + defer func() { + if mustRelease { + values.Release() + } + }() + + if arr.Len() > 0 && voffsets != nil { + values_offset, _ = arr.ValueOffsets(0) + _, values_end = arr.ValueOffsets(arr.Len() - 1) + } + + if arr.Len() != 0 || values_end < int64(values.Len()) { + // must also slice the values + values = array.NewSlice(values, values_offset, values_end) + mustRelease = true + } + err = w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.FixedSizeListType: + arr := arr.(*array.FixedSizeList) + + w.depth-- + + size := int64(arr.DataType().(*arrow.FixedSizeListType).Len()) + beg := int64(arr.Offset()) * size + end := int64(arr.Offset()+arr.Len()) * size + + values := array.NewSlice(arr.ListValues(), beg, end) + defer values.Release() + + err := w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.RunEndEncodedType: + arr := arr.(*array.RunEndEncoded) + w.depth-- + child := arr.LogicalRunEndsArray(w.mem) + defer child.Release() + if err := w.visit(p, child); err != nil { + return err + } + child = arr.LogicalValuesArray() + defer child.Release() + if err := w.visit(p, child); err != nil { + return err + } + w.depth++ + + default: + panic(fmt.Errorf("arrow/ipc: unknown array %T (dtype=%T)", arr, dtype)) + } + + return nil +} + +func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) (*memory.Buffer, error) { + data := arr.Data() + voffsets := data.Buffers()[1] + offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() + offsetBytesNeeded := offsetTraits.BytesRequired(data.Len() + 1) + + if data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() { + // if we have a non-zero offset, then the value offsets do not start at + // zero. we must a) create a new offsets array with shifted offsets and + // b) slice the values array accordingly + // + // or if there are more value offsets than values (the array has been sliced) + // we need to trim off the trailing offsets + shiftedOffsets := memory.NewResizableBuffer(w.mem) + shiftedOffsets.Resize(offsetBytesNeeded) + + switch arr.DataType().Layout().Buffers[1].ByteWidth { + case 8: + dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1] + + startOffset := offsets[0] + for i, o := range offsets { + dest[i] = o - startOffset + } + + default: + debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth") + dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1] + + startOffset := offsets[0] + for i, o := range offsets { + dest[i] = o - startOffset + } + } + + voffsets = shiftedOffsets + } else { + voffsets.Retain() + } + if voffsets == nil || voffsets.Len() == 0 { + return nil, nil + } + + return voffsets, nil +} + +func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offsets, lengths []int32) *memory.Buffer { + // this case sucks. Because the offsets are different for each + // child array, when we have a sliced array, we need to re-base + // the value offsets for each array! ew. + unshiftedOffsets := arr.RawValueOffsets() + codes := arr.RawTypeCodes() + + shiftedOffsetsBuf := memory.NewResizableBuffer(w.mem) + shiftedOffsetsBuf.Resize(arrow.Int32Traits.BytesRequired(arr.Len())) + shiftedOffsets := arrow.Int32Traits.CastFromBytes(shiftedOffsetsBuf.Bytes()) + + // compute shifted offsets by subtracting child offset + for i, c := range codes { + if offsets[c] == -1 { + // offsets are guaranteed to be increasing according to the spec + // so the first offset we find for a child is the initial offset + // and will become the "0" for this child. + offsets[c] = unshiftedOffsets[i] + shiftedOffsets[i] = 0 + } else { + shiftedOffsets[i] = unshiftedOffsets[i] - offsets[c] + } + lengths[c] = maxI32(lengths[c], shiftedOffsets[i]+1) + } + return shiftedOffsetsBuf +} + +func (w *recordEncoder) Encode(p *Payload, rec arrow.Record) error { + if err := w.encode(p, rec); err != nil { + return err + } + return w.encodeMetadata(p, rec.NumRows()) +} + +func (w *recordEncoder) encodeMetadata(p *Payload, nrows int64) error { + p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta, w.codec) + return nil +} + +func newTruncatedBitmap(mem memory.Allocator, offset, length int64, input *memory.Buffer) *memory.Buffer { + if input == nil { + return nil + } + + minLength := paddedLength(bitutil.BytesForBits(length), kArrowAlignment) + switch { + case offset != 0 || minLength < int64(input.Len()): + // with a sliced array / non-zero offset, we must copy the bitmap + buf := memory.NewResizableBuffer(mem) + buf.Resize(int(minLength)) + bitutil.CopyBitmap(input.Bytes(), int(offset), int(length), buf.Bytes(), 0) + return buf + default: + input.Retain() + return input + } +} + +func getTruncatedBuffer(offset, length int64, byteWidth int32, buf *memory.Buffer) *memory.Buffer { + if buf == nil { + return buf + } + + paddedLen := paddedLength(length*int64(byteWidth), kArrowAlignment) + if offset != 0 || paddedLen < int64(buf.Len()) { + return memory.SliceBuffer(buf, int(offset*int64(byteWidth)), int(minI64(paddedLen, int64(buf.Len())))) + } + buf.Retain() + return buf +} + +func needTruncate(offset int64, buf *memory.Buffer, minLength int64) bool { + if buf == nil { + return false + } + return offset != 0 || minLength < int64(buf.Len()) +} + +func minI64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func maxI32(a, b int32) int32 { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/Makefile b/vendor/github.com/apache/arrow/go/v12/arrow/memory/Makefile new file mode 100644 index 00000000..1cc4079c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/Makefile @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +GO_BUILD=go build +GO_GEN=go generate +GO_TEST=go test +GOPATH=$(realpath ../../../..) + +# this converts rotate instructions from "ro[lr] " -> "ro[lr] , 1" for yasm compatibility +PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/' + +C2GOASM=c2goasm -a -f +CC=clang +C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 -fno-asynchronous-unwind-tables \ + -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib +ASM_FLAGS_AVX2=-mavx2 -mfma -mllvm -force-vector-width=32 +ASM_FLAGS_SSE3=-msse3 +ASM_FLAGS_SSE4=-msse4 + +C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \ + -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') + + +INTEL_SOURCES := \ + memory_avx2_amd64.s memory_sse4_amd64.s + +.PHONEY: assembly + +# +# ARROW-15320: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support. +# memory_neon_arm64.s were generated by asm2plan9s. +# And manually formatted it as the Arm64 Plan9. +# + +assembly: $(INTEL_SOURCES) + +_lib/memory_avx2.s: _lib/memory.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/memory_sse4.s: _lib/memory.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/memory_neon.s: _lib/memory.c + $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +memory_avx2_amd64.s: _lib/memory_avx2.s + $(C2GOASM) -a -f $^ $@ + +memory_sse4_amd64.s: _lib/memory_sse4.s + $(C2GOASM) -a -f $^ $@ diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/allocator.go new file mode 100644 index 00000000..1427190e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/allocator.go @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +const ( + alignment = 64 +) + +type Allocator interface { + Allocate(size int) []byte + Reallocate(size int, b []byte) []byte + Free(b []byte) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go new file mode 100644 index 00000000..ce445d2d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +// Buffer is a wrapper type for a buffer of bytes. +type Buffer struct { + refCount int64 + buf []byte + length int + mutable bool + mem Allocator + + parent *Buffer +} + +// NewBufferBytes creates a fixed-size buffer from the specified data. +func NewBufferBytes(data []byte) *Buffer { + return &Buffer{refCount: 0, buf: data, length: len(data)} +} + +// NewResizableBuffer creates a mutable, resizable buffer with an Allocator for managing memory. +func NewResizableBuffer(mem Allocator) *Buffer { + return &Buffer{refCount: 1, mutable: true, mem: mem} +} + +func SliceBuffer(buf *Buffer, offset, length int) *Buffer { + buf.Retain() + return &Buffer{refCount: 1, parent: buf, buf: buf.Bytes()[offset : offset+length], length: length} +} + +// Parent returns either nil or a pointer to the parent buffer if this buffer +// was sliced from another. +func (b *Buffer) Parent() *Buffer { return b.parent } + +// Retain increases the reference count by 1. +func (b *Buffer) Retain() { + if b.mem != nil || b.parent != nil { + atomic.AddInt64(&b.refCount, 1) + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *Buffer) Release() { + if b.mem != nil || b.parent != nil { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.mem != nil { + b.mem.Free(b.buf) + } else { + b.parent.Release() + b.parent = nil + } + b.buf, b.length = nil, 0 + } + } +} + +// Reset resets the buffer for reuse. +func (b *Buffer) Reset(buf []byte) { + if b.parent != nil { + b.parent.Release() + b.parent = nil + } + b.buf = buf + b.length = len(buf) +} + +// Buf returns the slice of memory allocated by the Buffer, which is adjusted by calling Reserve. +func (b *Buffer) Buf() []byte { return b.buf } + +// Bytes returns a slice of size Len, which is adjusted by calling Resize. +func (b *Buffer) Bytes() []byte { return b.buf[:b.length] } + +// Mutable returns a bool indicating whether the buffer is mutable or not. +func (b *Buffer) Mutable() bool { return b.mutable } + +// Len returns the length of the buffer. +func (b *Buffer) Len() int { return b.length } + +// Cap returns the capacity of the buffer. +func (b *Buffer) Cap() int { return len(b.buf) } + +// Reserve reserves the provided amount of capacity for the buffer. +func (b *Buffer) Reserve(capacity int) { + if capacity > len(b.buf) { + newCap := roundUpToMultipleOf64(capacity) + if len(b.buf) == 0 { + b.buf = b.mem.Allocate(newCap) + } else { + b.buf = b.mem.Reallocate(newCap, b.buf) + } + } +} + +// Resize resizes the buffer to the target size. +func (b *Buffer) Resize(newSize int) { + b.resize(newSize, true) +} + +// ResizeNoShrink resizes the buffer to the target size, but will not +// shrink it. +func (b *Buffer) ResizeNoShrink(newSize int) { + b.resize(newSize, false) +} + +func (b *Buffer) resize(newSize int, shrink bool) { + if !shrink || newSize > b.length { + b.Reserve(newSize) + } else { + // Buffer is not growing, so shrink to the requested size without + // excess space. + newCap := roundUpToMultipleOf64(newSize) + if len(b.buf) != newCap { + if newSize == 0 { + b.mem.Free(b.buf) + b.buf = nil + } else { + b.buf = b.mem.Reallocate(newCap, b.buf) + } + } + } + b.length = newSize +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go new file mode 100644 index 00000000..1c149c0a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cgo +// +build ccalloc + +package memory + +import ( + "runtime" + + cga "github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc" +) + +// CgoArrowAllocator is an allocator which exposes the C++ memory pool class +// from the Arrow C++ Library as an allocator for memory buffers to use in Go. +// The build tag 'ccalloc' must be used in order to include it as it requires +// linking against the arrow library. +// +// The primary reason to use this would be as an allocator when dealing with +// exporting data across the cdata interface in order to ensure that the memory +// is allocated safely on the C side so it can be held on the CGO side beyond +// the context of a single function call. If the memory in use isn't allocated +// on the C side, then it is not safe for any pointers to data to be held outside +// of Go beyond the context of a single Cgo function call as it will be invisible +// to the Go garbage collector and could potentially get moved without being updated. +// +// As an alternative, if the arrow C++ libraries aren't available, remember that +// Allocator is an interface, so anything which can allocate data using C/C++ can +// be exposed and then used to meet the Allocator interface if wanting to export data +// across the Cgo interfaces. +type CgoArrowAllocator struct { + pool cga.CGOMemPool +} + +// Allocate does what it says on the tin, allocates a chunk of memory using the underlying +// memory pool, however CGO calls are 'relatively' expensive, which means doing tons of +// small allocations can end up being expensive and potentially slower than just using +// go memory. This means that preallocating via reserve becomes much more important when +// using this allocator. +// +// Future development TODO: look into converting this more into a slab style allocator +// which amortizes the cost of smaller allocations by allocating bigger chunks of memory +// and passes them out. +func (alloc *CgoArrowAllocator) Allocate(size int) []byte { + b := cga.CgoPoolAlloc(alloc.pool, size) + return b +} + +func (alloc *CgoArrowAllocator) Free(b []byte) { + cga.CgoPoolFree(alloc.pool, b) +} + +func (alloc *CgoArrowAllocator) Reallocate(size int, b []byte) []byte { + oldSize := len(b) + out := cga.CgoPoolRealloc(alloc.pool, size, b) + + if size > oldSize { + // zero initialize the slice like go would do normally + // C won't zero initialize the memory. + Set(out[oldSize:], 0) + } + return out +} + +// AllocatedBytes returns the current total of bytes that have been allocated by +// the memory pool on the C++ side. +func (alloc *CgoArrowAllocator) AllocatedBytes() int64 { + return cga.CgoPoolCurBytes(alloc.pool) +} + +// AssertSize can be used for testing to ensure and check that there are no memory +// leaks using the allocator. +func (alloc *CgoArrowAllocator) AssertSize(t TestingT, sz int) { + cur := alloc.AllocatedBytes() + if int64(sz) != cur { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", sz, cur) + } +} + +// NewCgoArrowAllocator creates a new allocator which is backed by the C++ Arrow +// memory pool object which could potentially be using jemalloc or mimalloc or +// otherwise as its backend. Memory allocated by this is invisible to the Go +// garbage collector, and as such care should be taken to avoid any memory leaks. +// +// A finalizer is set on the allocator so when the allocator object itself is eventually +// cleaned up by the garbage collector, it will delete the associated C++ memory pool +// object. If the build tag 'cclog' is added, then the memory pool will output a log line +// for every time memory is allocated, freed or reallocated. +func NewCgoArrowAllocator() *CgoArrowAllocator { + alloc := &CgoArrowAllocator{pool: cga.NewCgoArrowAllocator(enableLogging)} + runtime.SetFinalizer(alloc, func(a *CgoArrowAllocator) { cga.ReleaseCGOMemPool(a.pool) }) + return alloc +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_defaults.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_defaults.go new file mode 100644 index 00000000..501431a0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_defaults.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cgo +// +build ccalloc +// +build !cclog + +package memory + +const enableLogging = false diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_logging.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_logging.go new file mode 100644 index 00000000..01ad6b39 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_logging.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cgo +// +build ccalloc +// +build cclog + +package memory + +const enableLogging = true diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go new file mode 100644 index 00000000..06be9bda --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import ( + "os" + "runtime" + "strconv" + "sync" + "sync/atomic" + "unsafe" +) + +type CheckedAllocator struct { + mem Allocator + sz int64 + + allocs sync.Map +} + +func NewCheckedAllocator(mem Allocator) *CheckedAllocator { + return &CheckedAllocator{mem: mem} +} + +func (a *CheckedAllocator) CurrentAlloc() int { return int(atomic.LoadInt64(&a.sz)) } + +func (a *CheckedAllocator) Allocate(size int) []byte { + atomic.AddInt64(&a.sz, int64(size)) + out := a.mem.Allocate(size) + if size == 0 { + return out + } + + ptr := uintptr(unsafe.Pointer(&out[0])) + if pc, _, l, ok := runtime.Caller(allocFrames); ok { + a.allocs.Store(ptr, &dalloc{pc: pc, line: l, sz: size}) + } + return out +} + +func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte { + atomic.AddInt64(&a.sz, int64(size-len(b))) + + oldptr := uintptr(unsafe.Pointer(&b[0])) + out := a.mem.Reallocate(size, b) + if size == 0 { + return out + } + + newptr := uintptr(unsafe.Pointer(&out[0])) + a.allocs.Delete(oldptr) + if pc, _, l, ok := runtime.Caller(reallocFrames); ok { + a.allocs.Store(newptr, &dalloc{pc: pc, line: l, sz: size}) + } + return out +} + +func (a *CheckedAllocator) Free(b []byte) { + atomic.AddInt64(&a.sz, int64(len(b)*-1)) + defer a.mem.Free(b) + + if len(b) == 0 { + return + } + + ptr := uintptr(unsafe.Pointer(&b[0])) + a.allocs.Delete(ptr) +} + +// typically the allocations are happening in memory.Buffer, not by consumers calling +// allocate/reallocate directly. As a result, we want to skip the caller frames +// of the inner workings of Buffer in order to find the caller that actually triggered +// the allocation via a call to Resize/Reserve/etc. +const ( + defAllocFrames = 4 + defReallocFrames = 3 +) + +// Use the environment variables ARROW_CHECKED_ALLOC_FRAMES and ARROW_CHECKED_REALLOC_FRAMES +// to control how many frames up it checks when storing the caller for allocations/reallocs +// when using this to find memory leaks. +var allocFrames, reallocFrames int = defAllocFrames, defReallocFrames + +func init() { + if val, ok := os.LookupEnv("ARROW_CHECKED_ALLOC_FRAMES"); ok { + if f, err := strconv.Atoi(val); err == nil { + allocFrames = f + } + } + + if val, ok := os.LookupEnv("ARROW_CHECKED_REALLOC_FRAMES"); ok { + if f, err := strconv.Atoi(val); err == nil { + reallocFrames = f + } + } +} + +type dalloc struct { + pc uintptr + line int + sz int +} + +type TestingT interface { + Errorf(format string, args ...interface{}) + Helper() +} + +func (a *CheckedAllocator) AssertSize(t TestingT, sz int) { + a.allocs.Range(func(_, value interface{}) bool { + info := value.(*dalloc) + f := runtime.FuncForPC(info.pc) + t.Errorf("LEAK of %d bytes FROM %s line %d\n", info.sz, f.Name(), info.line) + return true + }) + + if int(atomic.LoadInt64(&a.sz)) != sz { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", sz, a.sz) + } +} + +type CheckedAllocatorScope struct { + alloc *CheckedAllocator + sz int +} + +func NewCheckedAllocatorScope(alloc *CheckedAllocator) *CheckedAllocatorScope { + sz := atomic.LoadInt64(&alloc.sz) + return &CheckedAllocatorScope{alloc: alloc, sz: int(sz)} +} + +func (c *CheckedAllocatorScope) CheckSize(t TestingT) { + sz := int(atomic.LoadInt64(&c.alloc.sz)) + if c.sz != sz { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", c.sz, sz) + } +} + +var ( + _ Allocator = (*CheckedAllocator)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_allocator.go new file mode 100644 index 00000000..f60caccd --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_allocator.go @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !mallocator || !cgo + +package memory + +// DefaultAllocator is a default implementation of Allocator and can be used anywhere +// an Allocator is required. +// +// DefaultAllocator is safe to use from multiple goroutines. +var DefaultAllocator Allocator = NewGoAllocator() diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go new file mode 100644 index 00000000..060bf487 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build mallocator && cgo + +package memory + +import ( + "github.com/apache/arrow/go/v12/arrow/memory/mallocator" +) + +// DefaultAllocator is a default implementation of Allocator and can be used anywhere +// an Allocator is required. +// +// DefaultAllocator is safe to use from multiple goroutines. +var DefaultAllocator Allocator = mallocator.NewMallocator() diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/doc.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/doc.go new file mode 100644 index 00000000..20a28e4e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/doc.go @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package memory provides support for allocating and manipulating memory at a low level. + +The build tag 'mallocator' will switch the default allocator to one backed by libc malloc. This also requires CGO. +*/ +package memory diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go new file mode 100644 index 00000000..1dea4a8d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +type GoAllocator struct{} + +func NewGoAllocator() *GoAllocator { return &GoAllocator{} } + +func (a *GoAllocator) Allocate(size int) []byte { + buf := make([]byte, size+alignment) // padding for 64-byte alignment + addr := int(addressOf(buf)) + next := roundUpToMultipleOf64(addr) + if addr != next { + shift := next - addr + return buf[shift : size+shift : size+shift] + } + return buf[:size:size] +} + +func (a *GoAllocator) Reallocate(size int, b []byte) []byte { + if size == len(b) { + return b + } + + newBuf := a.Allocate(size) + copy(newBuf, b) + return newBuf +} + +func (a *GoAllocator) Free(b []byte) {} + +var ( + _ Allocator = (*GoAllocator)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.cc b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.cc new file mode 100644 index 00000000..b2b03737 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.cc @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build ccalloc + +#include "allocator.h" +#include "arrow/memory_pool.h" +#include "helpers.h" + +struct mem_holder { + std::unique_ptr owned_pool; + arrow::MemoryPool* pool; +}; + +ArrowMemoryPool arrow_create_memory_pool(bool enable_logging) { + auto holder = std::make_shared(); + if (enable_logging) { + holder->owned_pool.reset(new arrow::LoggingMemoryPool(arrow::default_memory_pool())); + holder->pool = holder->owned_pool.get(); + } else { + holder->pool = arrow::default_memory_pool(); + } + + return create_ref(holder); +} + +void arrow_release_pool(ArrowMemoryPool pool) { + release_ref(pool); +} + +int arrow_pool_allocate(ArrowMemoryPool pool, int64_t size, uint8_t** out) { + auto holder = retrieve_instance(pool); + auto status = holder->pool->Allocate(size, out); + if (!status.ok()) { + return 1; + } + return 0; +} + +void arrow_pool_free(ArrowMemoryPool pool, uint8_t* buffer, int64_t size) { + auto holder = retrieve_instance(pool); + holder->pool->Free(buffer, size); +} + +int arrow_pool_reallocate(ArrowMemoryPool pool, int64_t old_size, int64_t new_size, uint8_t** ptr) { + auto holder = retrieve_instance(pool); + auto status = holder->pool->Reallocate(old_size, new_size, ptr); + if (!status.ok()) { + return 1; + } + return 0; +} + +int64_t arrow_pool_bytes_allocated(ArrowMemoryPool pool) { + auto holder = retrieve_instance(pool); + return holder->pool->bytes_allocated(); +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go new file mode 100644 index 00000000..213e7599 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ccalloc + +package cgoalloc + +// #cgo !windows pkg-config: arrow +// #cgo CXXFLAGS: -std=c++14 +// #cgo windows LDFLAGS: -larrow +// #include "allocator.h" +import "C" +import ( + "reflect" + "unsafe" +) + +// CGOMemPool is an alias to the typedef'd uintptr from the allocator.h file +type CGOMemPool = C.ArrowMemoryPool + +// CgoPoolAlloc allocates a block of memory of length 'size' using the memory +// pool that is passed in. +func CgoPoolAlloc(pool CGOMemPool, size int) []byte { + var ret []byte + if size == 0 { + return ret + } + + var out *C.uint8_t + C.arrow_pool_allocate(pool, C.int64_t(size), (**C.uint8_t)(unsafe.Pointer(&out))) + + s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) + s.Data = uintptr(unsafe.Pointer(out)) + s.Len = size + s.Cap = size + + return ret +} + +// CgoPoolRealloc calls 'reallocate' on the block of memory passed in which must +// be a slice that was returned by CgoPoolAlloc or CgoPoolRealloc. +func CgoPoolRealloc(pool CGOMemPool, size int, b []byte) []byte { + if len(b) == 0 { + return CgoPoolAlloc(pool, size) + } + + oldSize := C.int64_t(len(b)) + data := (*C.uint8_t)(unsafe.Pointer(&b[0])) + C.arrow_pool_reallocate(pool, oldSize, C.int64_t(size), &data) + + var ret []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) + s.Data = uintptr(unsafe.Pointer(data)) + s.Len = size + s.Cap = size + + return ret +} + +// CgoPoolFree uses the indicated memory pool to free a block of memory. The +// slice passed in *must* be a slice which was returned by CgoPoolAlloc or +// CgoPoolRealloc. +func CgoPoolFree(pool CGOMemPool, b []byte) { + if len(b) == 0 { + return + } + + oldSize := C.int64_t(len(b)) + data := (*C.uint8_t)(unsafe.Pointer(&b[0])) + C.arrow_pool_free(pool, data, oldSize) +} + +// CgoPoolCurBytes returns the current number of bytes allocated by the +// passed in memory pool. +func CgoPoolCurBytes(pool CGOMemPool) int64 { + return int64(C.arrow_pool_bytes_allocated(pool)) +} + +// ReleaseCGOMemPool deletes and frees the memory associated with the +// passed in memory pool on the C++ side. +func ReleaseCGOMemPool(pool CGOMemPool) { + C.arrow_release_pool(pool) +} + +// NewCgoArrowAllocator constructs a new memory pool in C++ and returns +// a reference to it which can then be used with the other functions +// here in order to use it. +// +// Optionally if logging is true, a logging proxy will be wrapped around +// the memory pool so that it will output a line every time memory is +// allocated, reallocated or freed along with the size of the allocation. +func NewCgoArrowAllocator(logging bool) CGOMemPool { + return C.arrow_create_memory_pool(C.bool(logging)) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.h b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.h new file mode 100644 index 00000000..0c874437 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uintptr_t ArrowMemoryPool; + +ArrowMemoryPool arrow_create_memory_pool(bool enable_logging); +int arrow_pool_allocate(ArrowMemoryPool pool, int64_t size, uint8_t** out); +int arrow_pool_reallocate(ArrowMemoryPool pool, int64_t old_size, int64_t new_size, uint8_t** ptr); +void arrow_pool_free(ArrowMemoryPool pool, uint8_t* buffer, int64_t size); +int64_t arrow_pool_bytes_allocated(ArrowMemoryPool pool); +void arrow_release_pool(ArrowMemoryPool pool); + + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/helpers.h b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/helpers.h new file mode 100644 index 00000000..fa5feb6a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/helpers.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +// helper functions to be included by C++ code for interacting with Cgo + +// create_ref will construct a shared_ptr on the heap and return a pointer +// to it. the returned uintptr_t can then be used with retrieve_instance +// to get back the shared_ptr and object it refers to. This ensures that +// the object outlives the exported function so that Go can use it. +template +uintptr_t create_ref(std::shared_ptr t) { + std::shared_ptr* retained_ptr = new std::shared_ptr(t); + return reinterpret_cast(retained_ptr); +} + +// retrieve_instance is used to get back the shared_ptr which was created with +// create_ref in order to use it in functions where the caller passes back the +// uintptr_t so that an object can be managed by C++ while a reference to it +// is passed around in C/CGO +template +std::shared_ptr retrieve_instance(uintptr_t ref) { + std::shared_ptr* retrieved_ptr = reinterpret_cast*>(ref); + return *retrieved_ptr; +} + +// release_ref deletes the shared_ptr that was created by create_ref, freeing the +// object if it was the last shared_ptr which referenced it as per normal smart_ptr +// rules. +template +void release_ref(uintptr_t ref) { + std::shared_ptr* retrieved_ptr = reinterpret_cast*>(ref); + delete retrieved_ptr; +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/doc.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/doc.go new file mode 100644 index 00000000..a399d85e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/doc.go @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package mallocator defines an allocator implementation for +// memory.Allocator which defers to libc malloc. It requires +// usage of CGO. +package mallocator diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/mallocator.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/mallocator.go new file mode 100644 index 00000000..18e0377c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/mallocator.go @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mallocator + +// #include +// #include +// +// void* realloc_and_initialize(void* ptr, size_t old_len, size_t new_len) { +// void* new_ptr = realloc(ptr, new_len); +// if (new_ptr && new_len > old_len) { +// memset(new_ptr + old_len, 0, new_len - old_len); +// } +// return new_ptr; +// } +import "C" + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +// Mallocator is an allocator which defers to libc malloc. +// +// The priamry reason to use this is when exporting data across the C Data +// Interface. CGO requires that pointers to Go memory are not stored in C +// memory, which is exactly what the C Data Interface would otherwise +// require. By allocating with Mallocator up front, we can safely export the +// buffers in Arrow arrays without copying buffers or violating CGO rules. +// +// The build tag 'mallocator' will also make this the default allocator. +type Mallocator struct { + allocatedBytes uint64 +} + +func NewMallocator() *Mallocator { return &Mallocator{} } + +func (alloc *Mallocator) Allocate(size int) []byte { + // Use calloc to zero-initialize memory. + // > ...the current implementation may sometimes cause a runtime error if the + // > contents of the C memory appear to be a Go pointer. Therefore, avoid + // > passing uninitialized C memory to Go code if the Go code is going to store + // > pointer values in it. Zero out the memory in C before passing it to Go. + if size < 0 { + panic("mallocator: negative size") + } + ptr, err := C.calloc(C.size_t(size), 1) + if err != nil { + panic(err) + } else if ptr == nil { + panic("mallocator: out of memory") + } + atomic.AddUint64(&alloc.allocatedBytes, uint64(size)) + return unsafe.Slice((*byte)(ptr), size) +} + +func (alloc *Mallocator) Free(b []byte) { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + C.free(unsafe.Pointer(sh.Data)) + // Subtract sh.Len via two's complement (since atomic doesn't offer subtract) + atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(sh.Len) - 1)) +} + +func (alloc *Mallocator) Reallocate(size int, b []byte) []byte { + if size < 0 { + panic("mallocator: negative size") + } + sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + ptr, err := C.realloc_and_initialize(unsafe.Pointer(sh.Data), C.size_t(sh.Cap), C.size_t(size)) + if err != nil { + panic(err) + } else if ptr == nil && size != 0 { + panic("mallocator: out of memory") + } + delta := size - len(b) + if delta >= 0 { + atomic.AddUint64(&alloc.allocatedBytes, uint64(delta)) + } else { + atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(-delta) - 1)) + } + return unsafe.Slice((*byte)(ptr), size) +} + +func (alloc *Mallocator) AllocatedBytes() int64 { + return int64(alloc.allocatedBytes) +} + +// Duplicate interface to avoid circular import +type TestingT interface { + Errorf(format string, args ...interface{}) + Helper() +} + +func (alloc *Mallocator) AssertSize(t TestingT, sz int) { + cur := alloc.AllocatedBytes() + if int64(sz) != cur { + t.Helper() + t.Errorf("invalid memory size exp=%d, got=%d", sz, cur) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory.go new file mode 100644 index 00000000..43627f5e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +var ( + memset func(b []byte, c byte) = memory_memset_go +) + +// Set assigns the value c to every element of the slice buf. +func Set(buf []byte, c byte) { + memset(buf, c) +} + +// memory_memset_go reference implementation +func memory_memset_go(buf []byte, c byte) { + for i := 0; i < len(buf); i++ { + buf[i] = c + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_amd64.go new file mode 100644 index 00000000..58356d64 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_amd64.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import ( + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAVX2 { + memset = memory_memset_avx2 + } else if cpu.X86.HasSSE42 { + memset = memory_memset_sse4 + } else { + memset = memory_memset_go + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_arm64.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_arm64.go new file mode 100644 index 00000000..3db5d110 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_arm64.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import ( + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.ARM64.HasASIMD { + memset = memory_memset_neon + } else { + memset = memory_memset_go + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.go new file mode 100644 index 00000000..2bd851ea --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.go @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import "unsafe" + +//go:noescape +func _memset_avx2(buf unsafe.Pointer, len, c uintptr) + +func memory_memset_avx2(buf []byte, c byte) { + if len(buf) == 0 { + return + } + + var ( + p1 = unsafe.Pointer(&buf[0]) + p2 = uintptr(len(buf)) + p3 = uintptr(c) + ) + if len(buf) > 2000 || isMultipleOfPowerOf2(len(buf), 256) { + _memset_avx2(p1, p2, p3) + } else { + _memset_sse4(p1, p2, p3) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.s new file mode 100644 index 00000000..2a77807c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.s @@ -0,0 +1,85 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_memset_avx2(SB), $0-24 + + MOVQ buf+0(FP), DI + MOVQ len+8(FP), SI + MOVQ c+16(FP), DX + + LONG $0x371c8d4c // lea r11, [rdi + rsi] + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JBE LBB0_13 + LONG $0x80fe8148; WORD $0x0000; BYTE $0x00 // cmp rsi, 128 + JB LBB0_12 + WORD $0x8949; BYTE $0xf0 // mov r8, rsi + LONG $0x80e08349 // and r8, -128 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x80e28349 // and r10, -128 + JE LBB0_12 + LONG $0xc26ef9c5 // vmovd xmm0, edx + LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 + LONG $0x804a8d4d // lea r9, [r10 - 128] + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0xe8c1; BYTE $0x07 // shr eax, 7 + WORD $0xc0ff // inc eax + LONG $0x03e08348 // and rax, 3 + JE LBB0_4 + WORD $0xf748; BYTE $0xd8 // neg rax + WORD $0xc931 // xor ecx, ecx + +LBB0_6: + LONG $0x047ffec5; BYTE $0x0f // vmovdqu yword [rdi + rcx], ymm0 + LONG $0x447ffec5; WORD $0x200f // vmovdqu yword [rdi + rcx + 32], ymm0 + LONG $0x447ffec5; WORD $0x400f // vmovdqu yword [rdi + rcx + 64], ymm0 + LONG $0x447ffec5; WORD $0x600f // vmovdqu yword [rdi + rcx + 96], ymm0 + LONG $0x80e98348 // sub rcx, -128 + WORD $0xff48; BYTE $0xc0 // inc rax + JNE LBB0_6 + JMP LBB0_7 + +LBB0_4: + WORD $0xc931 // xor ecx, ecx + +LBB0_7: + LONG $0x80f98149; WORD $0x0001; BYTE $0x00 // cmp r9, 384 + JB LBB0_10 + WORD $0x894c; BYTE $0xd0 // mov rax, r10 + WORD $0x2948; BYTE $0xc8 // sub rax, rcx + QUAD $0x000001e00f8c8d48 // lea rcx, [rdi + rcx + 480] + +LBB0_9: + QUAD $0xfffffe20817ffec5 // vmovdqu yword [rcx - 480], ymm0 + QUAD $0xfffffe40817ffec5 // vmovdqu yword [rcx - 448], ymm0 + QUAD $0xfffffe60817ffec5 // vmovdqu yword [rcx - 416], ymm0 + QUAD $0xfffffe80817ffec5 // vmovdqu yword [rcx - 384], ymm0 + QUAD $0xfffffea0817ffec5 // vmovdqu yword [rcx - 352], ymm0 + QUAD $0xfffffec0817ffec5 // vmovdqu yword [rcx - 320], ymm0 + QUAD $0xfffffee0817ffec5 // vmovdqu yword [rcx - 288], ymm0 + QUAD $0xffffff00817ffec5 // vmovdqu yword [rcx - 256], ymm0 + QUAD $0xffffff20817ffec5 // vmovdqu yword [rcx - 224], ymm0 + QUAD $0xffffff40817ffec5 // vmovdqu yword [rcx - 192], ymm0 + QUAD $0xffffff60817ffec5 // vmovdqu yword [rcx - 160], ymm0 + LONG $0x417ffec5; BYTE $0x80 // vmovdqu yword [rcx - 128], ymm0 + LONG $0x417ffec5; BYTE $0xa0 // vmovdqu yword [rcx - 96], ymm0 + LONG $0x417ffec5; BYTE $0xc0 // vmovdqu yword [rcx - 64], ymm0 + LONG $0x417ffec5; BYTE $0xe0 // vmovdqu yword [rcx - 32], ymm0 + LONG $0x017ffec5 // vmovdqu yword [rcx], ymm0 + LONG $0x00c18148; WORD $0x0002; BYTE $0x00 // add rcx, 512 + LONG $0xfe000548; WORD $0xffff // add rax, -512 + JNE LBB0_9 + +LBB0_10: + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + JE LBB0_13 + WORD $0x014c; BYTE $0xc7 // add rdi, r8 + +LBB0_12: + WORD $0x1788 // mov byte [rdi], dl + WORD $0xff48; BYTE $0xc7 // inc rdi + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JNE LBB0_12 + +LBB0_13: + VZEROUPPER + RET diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_js_wasm.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_js_wasm.go new file mode 100644 index 00000000..9b94d99f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_js_wasm.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build wasm + +package memory + +func init() { + memset = memory_memset_go +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.go new file mode 100644 index 00000000..6cb0400c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import "unsafe" + +//go:noescape +func _memset_neon(buf unsafe.Pointer, len, c uintptr) + +func memory_memset_neon(buf []byte, c byte) { + if len(buf) == 0 { + return + } + _memset_neon(unsafe.Pointer(&buf[0]), uintptr(len(buf)), uintptr(c)) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.s b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.s new file mode 100644 index 00000000..18655cc7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.s @@ -0,0 +1,43 @@ +//+build !noasm !appengine + +// ARROW-15320: +// (C2GOASM doesn't work correctly for Arm64) +// Partly GENERATED BY asm2plan9s. + +// func _memset_neon(buf unsafe.Pointer, len, c uintptr) +TEXT ยท_memset_neon(SB), $0-24 + + MOVD buf+0(FP), R0 + MOVD len+8(FP), R1 + MOVD c+16(FP), R2 + + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x8b010008 // add x8, x0, x1 + WORD $0xeb00011f // cmp x8, x0 + WORD $0x910003fd // mov x29, sp + BLS LBB0_7 + + WORD $0xf100803f // cmp x1, #32 + BHS LBB0_3 + WORD $0xaa0003e9 // mov x9, x0 + JMP LBB0_6 +LBB0_3: + WORD $0x927be82a // and x10, x1, #0xffffffffffffffe0 + WORD $0x4e010c40 // dup v0.16b, w2 + WORD $0x8b0a0009 // add x9, x0, x10 + WORD $0x9100400b // add x11, x0, #16 + WORD $0xaa0a03ec // mov x12, x10 +LBB0_4: + WORD $0xad3f8160 // stp q0, q0, [x11, #-16] + WORD $0xf100818c // subs x12, x12, #32 + WORD $0x9100816b // add x11, x11, #32 + BNE LBB0_4 + WORD $0xeb01015f // cmp x10, x1 + BEQ LBB0_7 +LBB0_6: + WORD $0x38001522 // strb w2, [x9], #1 + WORD $0xeb09011f // cmp x8, x9 + BNE LBB0_6 +LBB0_7: + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_noasm.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_noasm.go new file mode 100644 index 00000000..bf8846fa --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_noasm.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build noasm + +package memory + +func init() { + memset = memory_memset_go +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.go new file mode 100644 index 00000000..716c0d27 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !noasm + +package memory + +import "unsafe" + +//go:noescape +func _memset_sse4(buf unsafe.Pointer, len, c uintptr) + +func memory_memset_sse4(buf []byte, c byte) { + if len(buf) == 0 { + return + } + _memset_sse4(unsafe.Pointer(&buf[0]), uintptr(len(buf)), uintptr(c)) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.s new file mode 100644 index 00000000..b1906f99 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.s @@ -0,0 +1,84 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_memset_sse4(SB), $0-24 + + MOVQ buf+0(FP), DI + MOVQ len+8(FP), SI + MOVQ c+16(FP), DX + + LONG $0x371c8d4c // lea r11, [rdi + rsi] + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JBE LBB0_13 + LONG $0x20fe8348 // cmp rsi, 32 + JB LBB0_12 + WORD $0x8949; BYTE $0xf0 // mov r8, rsi + LONG $0xe0e08349 // and r8, -32 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0xe0e28349 // and r10, -32 + JE LBB0_12 + WORD $0xb60f; BYTE $0xc2 // movzx eax, dl + LONG $0xc06e0f66 // movd xmm0, eax + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0x00380f66; BYTE $0xc1 // pshufb xmm0, xmm1 + LONG $0xe04a8d4d // lea r9, [r10 - 32] + WORD $0x8944; BYTE $0xc9 // mov ecx, r9d + WORD $0xe9c1; BYTE $0x05 // shr ecx, 5 + WORD $0xc1ff // inc ecx + LONG $0x07e18348 // and rcx, 7 + JE LBB0_4 + WORD $0xf748; BYTE $0xd9 // neg rcx + WORD $0xc031 // xor eax, eax + +LBB0_6: + LONG $0x047f0ff3; BYTE $0x07 // movdqu oword [rdi + rax], xmm0 + LONG $0x447f0ff3; WORD $0x1007 // movdqu oword [rdi + rax + 16], xmm0 + LONG $0x20c08348 // add rax, 32 + WORD $0xff48; BYTE $0xc1 // inc rcx + JNE LBB0_6 + JMP LBB0_7 + +LBB0_4: + WORD $0xc031 // xor eax, eax + +LBB0_7: + LONG $0xe0f98149; WORD $0x0000; BYTE $0x00 // cmp r9, 224 + JB LBB0_10 + WORD $0x894c; BYTE $0xd1 // mov rcx, r10 + WORD $0x2948; BYTE $0xc1 // sub rcx, rax + QUAD $0x000000f007848d48 // lea rax, [rdi + rax + 240] + +LBB0_9: + QUAD $0xffffff10807f0ff3 // movdqu oword [rax - 240], xmm0 + QUAD $0xffffff20807f0ff3 // movdqu oword [rax - 224], xmm0 + QUAD $0xffffff30807f0ff3 // movdqu oword [rax - 208], xmm0 + QUAD $0xffffff40807f0ff3 // movdqu oword [rax - 192], xmm0 + QUAD $0xffffff50807f0ff3 // movdqu oword [rax - 176], xmm0 + QUAD $0xffffff60807f0ff3 // movdqu oword [rax - 160], xmm0 + QUAD $0xffffff70807f0ff3 // movdqu oword [rax - 144], xmm0 + LONG $0x407f0ff3; BYTE $0x80 // movdqu oword [rax - 128], xmm0 + LONG $0x407f0ff3; BYTE $0x90 // movdqu oword [rax - 112], xmm0 + LONG $0x407f0ff3; BYTE $0xa0 // movdqu oword [rax - 96], xmm0 + LONG $0x407f0ff3; BYTE $0xb0 // movdqu oword [rax - 80], xmm0 + LONG $0x407f0ff3; BYTE $0xc0 // movdqu oword [rax - 64], xmm0 + LONG $0x407f0ff3; BYTE $0xd0 // movdqu oword [rax - 48], xmm0 + LONG $0x407f0ff3; BYTE $0xe0 // movdqu oword [rax - 32], xmm0 + LONG $0x407f0ff3; BYTE $0xf0 // movdqu oword [rax - 16], xmm0 + LONG $0x007f0ff3 // movdqu oword [rax], xmm0 + LONG $0x01000548; WORD $0x0000 // add rax, 256 + LONG $0x00c18148; WORD $0xffff; BYTE $0xff // add rcx, -256 + JNE LBB0_9 + +LBB0_10: + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + JE LBB0_13 + WORD $0x014c; BYTE $0xc7 // add rdi, r8 + +LBB0_12: + WORD $0x1788 // mov byte [rdi], dl + WORD $0xff48; BYTE $0xc7 // inc rdi + WORD $0x3949; BYTE $0xfb // cmp r11, rdi + JNE LBB0_12 + +LBB0_13: + RET diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/util.go b/vendor/github.com/apache/arrow/go/v12/arrow/memory/util.go new file mode 100644 index 00000000..3b0d3a5c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/memory/util.go @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memory + +import "unsafe" + +func roundToPowerOf2(v, round int) int { + forceCarry := round - 1 + truncateMask := ^forceCarry + return (v + forceCarry) & truncateMask +} + +func roundUpToMultipleOf64(v int) int { + return roundToPowerOf2(v, 64) +} + +func isMultipleOfPowerOf2(v int, d int) bool { + return (v & (d - 1)) == 0 +} + +func addressOf(b []byte) uintptr { + return uintptr(unsafe.Pointer(&b[0])) +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/numeric.schema.json b/vendor/github.com/apache/arrow/go/v12/arrow/numeric.schema.json new file mode 100644 index 00000000..7fa2800a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/numeric.schema.json @@ -0,0 +1,15 @@ +{ + "title": "templates", + "type": "array", + "items": { + "title": "template", + "type": "object", + "properties": { + "Name": { + "type": "string", + "description": "The name of the template type" + } + }, + "required": ["Name"] + } +} \ No newline at end of file diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata b/vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata new file mode 100644 index 00000000..92cd48ba --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata @@ -0,0 +1,147 @@ +[ + { + "Name": "Int64", + "name": "int64", + "Type": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "BufferBuilder": true + } + }, + { + "Name": "Uint64", + "name": "uint64", + "Type": "uint64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Float64", + "name": "float64", + "Type": "float64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Int32", + "name": "int32", + "Type": "int32", + "Default": "0", + "Size": "4", + "Opt": { + "BufferBuilder": true + } + }, + { + "Name": "Uint32", + "name": "uint32", + "Type": "uint32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Float32", + "name": "float32", + "Type": "float32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Int16", + "name": "int16", + "Type": "int16", + "Default": "0", + "Size": "2" + }, + { + "Name": "Uint16", + "name": "uint16", + "Type": "uint16", + "Default": "0", + "Size": "2" + }, + { + "Name": "Int8", + "name": "int8", + "Type": "int8", + "Default": "0", + "Size": "1", + "Opt": { + "BufferBuilder": true + } + }, + { + "Name": "Uint8", + "name": "uint8", + "Type": "uint8", + "Default": "0", + "Size": "1" + }, + { + "Name": "Timestamp", + "name": "timestamp", + "Type": "Timestamp", + "QualifiedType": "arrow.Timestamp", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Time32", + "name": "time32", + "Type": "Time32", + "QualifiedType": "arrow.Time32", + "InternalType": "int32", + "Default": "0", + "Size": "4", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Time64", + "name": "time64", + "Type": "Time64", + "QualifiedType": "arrow.Time64", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + }, + { + "Name": "Date32", + "name": "date32", + "Type": "Date32", + "QualifiedType": "arrow.Date32", + "InternalType": "int32", + "Default": "0", + "Size": "4" + }, + { + "Name": "Date64", + "name": "date64", + "Type": "Date64", + "QualifiedType": "arrow.Date64", + "InternalType": "int64", + "Default": "0", + "Size": "8" + }, + { + "Name": "Duration", + "name": "duration", + "Type": "Duration", + "QualifiedType": "arrow.Duration", + "InternalType": "int64", + "Default": "0", + "Size": "8", + "Opt": { + "Parametric": true + } + } +] diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/record.go b/vendor/github.com/apache/arrow/go/v12/arrow/record.go new file mode 100644 index 00000000..b93f1015 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/record.go @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import "encoding/json" + +// Record is a collection of equal-length arrays matching a particular Schema. +// Also known as a RecordBatch in the spec and in some implementations. +// +// It is also possible to construct a Table from a collection of Records that +// all have the same schema. +type Record interface { + json.Marshaler + + Release() + Retain() + + Schema() *Schema + + NumRows() int64 + NumCols() int64 + + Columns() []Array + Column(i int) Array + ColumnName(i int) string + SetColumn(i int, col Array) (Record, error) + + // NewSlice constructs a zero-copy slice of the record with the indicated + // indices i and j, corresponding to array[i:j]. + // The returned record must be Release()'d after use. + // + // NewSlice panics if the slice is outside the valid range of the record array. + // NewSlice panics if j < i. + NewSlice(i, j int64) Record +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/schema.go b/vendor/github.com/apache/arrow/go/v12/arrow/schema.go new file mode 100644 index 00000000..87bfe2b4 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/schema.go @@ -0,0 +1,276 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "sort" + "strings" + + "github.com/apache/arrow/go/v12/arrow/endian" +) + +type Metadata struct { + keys []string + values []string +} + +func NewMetadata(keys, values []string) Metadata { + if len(keys) != len(values) { + panic("arrow: len mismatch") + } + + n := len(keys) + if n == 0 { + return Metadata{} + } + + md := Metadata{ + keys: make([]string, n), + values: make([]string, n), + } + copy(md.keys, keys) + copy(md.values, values) + return md +} + +func MetadataFrom(kv map[string]string) Metadata { + md := Metadata{ + keys: make([]string, 0, len(kv)), + values: make([]string, 0, len(kv)), + } + for k := range kv { + md.keys = append(md.keys, k) + } + sort.Strings(md.keys) + for _, k := range md.keys { + md.values = append(md.values, kv[k]) + } + return md +} + +func (md Metadata) Len() int { return len(md.keys) } +func (md Metadata) Keys() []string { return md.keys } +func (md Metadata) Values() []string { return md.values } + +func (md Metadata) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "[") + for i := range md.keys { + if i > 0 { + fmt.Fprintf(o, ", ") + } + fmt.Fprintf(o, "%q: %q", md.keys[i], md.values[i]) + } + fmt.Fprintf(o, "]") + return o.String() +} + +// FindKey returns the index of the key-value pair with the provided key name, +// or -1 if such a key does not exist. +func (md Metadata) FindKey(k string) int { + for i, v := range md.keys { + if v == k { + return i + } + } + return -1 +} + +// GetValue returns the value associated with the provided key name. +// If the key does not exist, the second return value is false. +func (md Metadata) GetValue(k string) (string, bool) { + i := md.FindKey(k) + if i < 0 { + return "", false + } + return md.values[i], true +} + +func (md Metadata) clone() Metadata { + if len(md.keys) == 0 { + return Metadata{} + } + + o := Metadata{ + keys: make([]string, len(md.keys)), + values: make([]string, len(md.values)), + } + copy(o.keys, md.keys) + copy(o.values, md.values) + + return o +} + +func (md Metadata) sortedIndices() []int { + idxes := make([]int, len(md.keys)) + for i := range idxes { + idxes[i] = i + } + + sort.Slice(idxes, func(i, j int) bool { + return md.keys[idxes[i]] < md.keys[idxes[j]] + }) + return idxes +} + +func (md Metadata) Equal(rhs Metadata) bool { + if md.Len() != rhs.Len() { + return false + } + + idxes := md.sortedIndices() + rhsIdxes := rhs.sortedIndices() + for i := range idxes { + j := idxes[i] + k := rhsIdxes[i] + if md.keys[j] != rhs.keys[k] || md.values[j] != rhs.values[k] { + return false + } + } + return true +} + +// Schema is a sequence of Field values, describing the columns of a table or +// a record batch. +type Schema struct { + fields []Field + index map[string][]int + meta Metadata + endianness endian.Endianness +} + +// NewSchema returns a new Schema value from the slice of fields and metadata. +// +// NewSchema panics if there is a field with an invalid DataType. +func NewSchema(fields []Field, metadata *Metadata) *Schema { + return NewSchemaWithEndian(fields, metadata, endian.NativeEndian) +} + +func NewSchemaWithEndian(fields []Field, metadata *Metadata, e endian.Endianness) *Schema { + sc := &Schema{ + fields: make([]Field, 0, len(fields)), + index: make(map[string][]int, len(fields)), + endianness: e, + } + if metadata != nil { + sc.meta = metadata.clone() + } + for i, field := range fields { + if field.Type == nil { + panic("arrow: field with nil DataType") + } + sc.fields = append(sc.fields, field) + sc.index[field.Name] = append(sc.index[field.Name], i) + } + return sc +} + +func (sc *Schema) WithEndianness(e endian.Endianness) *Schema { + return NewSchemaWithEndian(sc.fields, &sc.meta, e) +} + +func (sc *Schema) Endianness() endian.Endianness { return sc.endianness } +func (sc *Schema) IsNativeEndian() bool { return sc.endianness == endian.NativeEndian } +func (sc *Schema) Metadata() Metadata { return sc.meta } +func (sc *Schema) Fields() []Field { return sc.fields } +func (sc *Schema) Field(i int) Field { return sc.fields[i] } + +func (sc *Schema) FieldsByName(n string) ([]Field, bool) { + indices, ok := sc.index[n] + if !ok { + return nil, ok + } + fields := make([]Field, 0, len(indices)) + for _, v := range indices { + fields = append(fields, sc.fields[v]) + } + return fields, ok +} + +// FieldIndices returns the indices of the named field or nil. +func (sc *Schema) FieldIndices(n string) []int { + return sc.index[n] +} + +func (sc *Schema) HasField(n string) bool { return len(sc.FieldIndices(n)) > 0 } +func (sc *Schema) HasMetadata() bool { return len(sc.meta.keys) > 0 } + +// Equal returns whether two schema are equal. +// Equal does not compare the metadata. +func (sc *Schema) Equal(o *Schema) bool { + switch { + case sc == o: + return true + case sc == nil || o == nil: + return false + case len(sc.fields) != len(o.fields): + return false + case sc.endianness != o.endianness: + return false + } + + for i := range sc.fields { + if !sc.fields[i].Equal(o.fields[i]) { + return false + } + } + return true +} + +func (s *Schema) String() string { + o := new(strings.Builder) + fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields())) + for i, f := range s.Fields() { + if i > 0 { + o.WriteString("\n") + } + fmt.Fprintf(o, " - %v", f) + } + if s.endianness != endian.NativeEndian { + fmt.Fprintf(o, "\n endianness: %v", s.endianness) + } + if meta := s.Metadata(); meta.Len() > 0 { + fmt.Fprintf(o, "\n metadata: %v", meta) + } + return o.String() +} + +func (s *Schema) Fingerprint() string { + if s == nil { + return "" + } + + var b strings.Builder + b.WriteString("S{") + for _, f := range s.Fields() { + fieldFingerprint := f.Fingerprint() + if fieldFingerprint == "" { + return "" + } + + b.WriteString(fieldFingerprint) + b.WriteByte(';') + } + if s.endianness == endian.LittleEndian { + b.WriteByte('L') + } else { + b.WriteByte('B') + } + b.WriteByte('}') + return b.String() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/table.go b/vendor/github.com/apache/arrow/go/v12/arrow/table.go new file mode 100644 index 00000000..0d20d955 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/table.go @@ -0,0 +1,189 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "fmt" + "sync/atomic" + + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +// Table represents a logical sequence of chunked arrays of equal length. It is +// similar to a Record except that the columns are ChunkedArrays instead, +// allowing for a Table to be built up by chunks progressively whereas the columns +// in a single Record are always each a single contiguous array. +type Table interface { + Schema() *Schema + NumRows() int64 + NumCols() int64 + Column(i int) *Column + + Retain() + Release() +} + +// Column is an immutable column data structure consisting of +// a field (type metadata) and a chunked data array. +// +// To get strongly typed data from a Column, you need to iterate the +// chunks and type assert each individual Array. For example: +// +// switch column.DataType().ID { +// case arrow.INT32: +// for _, c := range column.Data().Chunks() { +// arr := c.(*array.Int32) +// // do something with arr +// } +// case arrow.INT64: +// for _, c := range column.Data().Chunks() { +// arr := c.(*array.Int64) +// // do something with arr +// } +// case ... +// } +type Column struct { + field Field + data *Chunked +} + +// NewColumnFromArr is a convenience function to create a column from +// a field and a non-chunked array. +// +// This provides a simple mechanism for bypassing the middle step of +// constructing a Chunked array of one and then releasing it because +// of the ref counting. +func NewColumnFromArr(field Field, arr Array) Column { + if !TypeEqual(field.Type, arr.DataType()) { + panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, field.Type, arr.DataType())) + } + + arr.Retain() + return Column{ + field: field, + data: &Chunked{ + refCount: 1, + chunks: []Array{arr}, + length: arr.Len(), + nulls: arr.NullN(), + dtype: field.Type, + }, + } +} + +// NewColumn returns a column from a field and a chunked data array. +// +// NewColumn panics if the field's data type is inconsistent with the data type +// of the chunked data array. +func NewColumn(field Field, chunks *Chunked) *Column { + col := Column{ + field: field, + data: chunks, + } + col.data.Retain() + + if !TypeEqual(col.data.DataType(), col.field.Type) { + col.data.Release() + panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, col.data.DataType(), col.field.Type)) + } + + return &col +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (col *Column) Retain() { + col.data.Retain() +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (col *Column) Release() { + col.data.Release() +} + +func (col *Column) Len() int { return col.data.Len() } +func (col *Column) NullN() int { return col.data.NullN() } +func (col *Column) Data() *Chunked { return col.data } +func (col *Column) Field() Field { return col.field } +func (col *Column) Name() string { return col.field.Name } +func (col *Column) DataType() DataType { return col.field.Type } + +// Chunked manages a collection of primitives arrays as one logical large array. +type Chunked struct { + refCount int64 // refCount must be first in the struct for 64 bit alignment and sync/atomic (https://github.com/golang/go/issues/37262) + + chunks []Array + + length int + nulls int + dtype DataType +} + +// NewChunked returns a new chunked array from the slice of arrays. +// +// NewChunked panics if the chunks do not have the same data type. +func NewChunked(dtype DataType, chunks []Array) *Chunked { + arr := &Chunked{ + chunks: make([]Array, 0, len(chunks)), + refCount: 1, + dtype: dtype, + } + for _, chunk := range chunks { + if chunk == nil { + continue + } + + if !TypeEqual(chunk.DataType(), dtype) { + panic(fmt.Errorf("%w: arrow/array: mismatch data type %s vs %s", ErrInvalid, chunk.DataType().String(), dtype.String())) + } + chunk.Retain() + arr.chunks = append(arr.chunks, chunk) + arr.length += chunk.Len() + arr.nulls += chunk.NullN() + } + return arr +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (a *Chunked) Retain() { + atomic.AddInt64(&a.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (a *Chunked) Release() { + debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases") + + if atomic.AddInt64(&a.refCount, -1) == 0 { + for _, arr := range a.chunks { + arr.Release() + } + a.chunks = nil + a.length = 0 + a.nulls = 0 + } +} + +func (a *Chunked) Len() int { return a.length } +func (a *Chunked) NullN() int { return a.nulls } +func (a *Chunked) DataType() DataType { return a.dtype } +func (a *Chunked) Chunks() []Array { return a.chunks } +func (a *Chunked) Chunk(i int) Array { return a.chunks[i] } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/tools.go b/vendor/github.com/apache/arrow/go/v12/arrow/tools.go new file mode 100644 index 00000000..37b6dde3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/tools.go @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build tools +// +build tools + +package tools + +import ( + _ "golang.org/x/tools/cmd/goimports" + _ "golang.org/x/tools/cmd/stringer" +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_string.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_string.go new file mode 100644 index 00000000..41a40738 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_string.go @@ -0,0 +1,61 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package arrow + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NULL-0] + _ = x[BOOL-1] + _ = x[UINT8-2] + _ = x[INT8-3] + _ = x[UINT16-4] + _ = x[INT16-5] + _ = x[UINT32-6] + _ = x[INT32-7] + _ = x[UINT64-8] + _ = x[INT64-9] + _ = x[FLOAT16-10] + _ = x[FLOAT32-11] + _ = x[FLOAT64-12] + _ = x[STRING-13] + _ = x[BINARY-14] + _ = x[FIXED_SIZE_BINARY-15] + _ = x[DATE32-16] + _ = x[DATE64-17] + _ = x[TIMESTAMP-18] + _ = x[TIME32-19] + _ = x[TIME64-20] + _ = x[INTERVAL_MONTHS-21] + _ = x[INTERVAL_DAY_TIME-22] + _ = x[DECIMAL128-23] + _ = x[DECIMAL256-24] + _ = x[LIST-25] + _ = x[STRUCT-26] + _ = x[SPARSE_UNION-27] + _ = x[DENSE_UNION-28] + _ = x[DICTIONARY-29] + _ = x[MAP-30] + _ = x[EXTENSION-31] + _ = x[FIXED_SIZE_LIST-32] + _ = x[DURATION-33] + _ = x[LARGE_STRING-34] + _ = x[LARGE_BINARY-35] + _ = x[LARGE_LIST-36] + _ = x[INTERVAL_MONTH_DAY_NANO-37] + _ = x[RUN_END_ENCODED-38] +} + +const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODED" + +var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go new file mode 100644 index 00000000..c4e93f5e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "github.com/apache/arrow/go/v12/arrow/bitutil" +) + +type booleanTraits struct{} + +var BooleanTraits booleanTraits + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (booleanTraits) BytesRequired(n int) int { return bitutil.CeilByte(n) / 8 } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go new file mode 100644 index 00000000..1fc653a1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/decimal128" + "github.com/apache/arrow/go/v12/arrow/endian" +) + +// Decimal128 traits +var Decimal128Traits decimal128Traits + +const ( + // Decimal128SizeBytes specifies the number of bytes required to store a single decimal128 in memory + Decimal128SizeBytes = int(unsafe.Sizeof(decimal128.Num{})) +) + +type decimal128Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (decimal128Traits) BytesRequired(n int) int { return Decimal128SizeBytes * n } + +// PutValue +func (decimal128Traits) PutValue(b []byte, v decimal128.Num) { + endian.Native.PutUint64(b[:8], uint64(v.LowBits())) + endian.Native.PutUint64(b[8:], uint64(v.HighBits())) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (decimal128Traits) CastFromBytes(b []byte) []decimal128.Num { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []decimal128.Num + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Decimal128SizeBytes + s.Cap = h.Cap / Decimal128SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (decimal128Traits) CastToBytes(b []decimal128.Num) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Decimal128SizeBytes + s.Cap = h.Cap * Decimal128SizeBytes + + return res +} + +// Copy copies src to dst. +func (decimal128Traits) Copy(dst, src []decimal128.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go new file mode 100644 index 00000000..0fd3256f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/decimal256" + "github.com/apache/arrow/go/v12/arrow/endian" +) + +// Decimal256 traits +var Decimal256Traits decimal256Traits + +const ( + Decimal256SizeBytes = int(unsafe.Sizeof(decimal256.Num{})) +) + +type decimal256Traits struct{} + +func (decimal256Traits) BytesRequired(n int) int { return Decimal256SizeBytes * n } + +func (decimal256Traits) PutValue(b []byte, v decimal256.Num) { + for i, a := range v.Array() { + start := i * 8 + endian.Native.PutUint64(b[start:], a) + } +} + +// CastFromBytes reinterprets the slice b to a slice of decimal256 +func (decimal256Traits) CastFromBytes(b []byte) []decimal256.Num { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []decimal256.Num + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Decimal256SizeBytes + s.Cap = h.Cap / Decimal256SizeBytes + + return res +} + +func (decimal256Traits) CastToBytes(b []decimal256.Num) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + s.Data = h.Data + s.Len = h.Len * Decimal256SizeBytes + s.Cap = h.Cap * Decimal256SizeBytes + + return res +} + +func (decimal256Traits) Copy(dst, src []decimal256.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go new file mode 100644 index 00000000..1adc6e8d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/apache/arrow/go/v12/arrow/endian" +) + +// Float16 traits +var Float16Traits float16Traits + +const ( + // Float16SizeBytes specifies the number of bytes required to store a single float16 in memory + Float16SizeBytes = int(unsafe.Sizeof(uint16(0))) +) + +type float16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float16Traits) BytesRequired(n int) int { return Float16SizeBytes * n } + +// PutValue +func (float16Traits) PutValue(b []byte, v float16.Num) { + endian.Native.PutUint16(b, uint16(v.Uint16())) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (float16Traits) CastFromBytes(b []byte) []float16.Num { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float16.Num + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float16SizeBytes + s.Cap = h.Cap / Float16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float16Traits) CastToBytes(b []float16.Num) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float16SizeBytes + s.Cap = h.Cap * Float16SizeBytes + + return res +} + +// Copy copies src to dst. +func (float16Traits) Copy(dst, src []float16.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go new file mode 100644 index 00000000..6d68bd0c --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v12/arrow/internal/debug" +) + +var ( + MonthIntervalTraits monthTraits + DayTimeIntervalTraits daytimeTraits + MonthDayNanoIntervalTraits monthDayNanoTraits +) + +func init() { + debug.Assert(MonthIntervalSizeBytes == 4, "MonthIntervalSizeBytes should be 4") + debug.Assert(DayTimeIntervalSizeBytes == 8, "DayTimeIntervalSizeBytes should be 8") + debug.Assert(MonthDayNanoIntervalSizeBytes == 16, "MonthDayNanoIntervalSizeBytes should be 16") +} + +// MonthInterval traits + +const ( + // MonthIntervalSizeBytes specifies the number of bytes required to store a single MonthInterval in memory + MonthIntervalSizeBytes = int(unsafe.Sizeof(MonthInterval(0))) +) + +type monthTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (monthTraits) BytesRequired(n int) int { return MonthIntervalSizeBytes * n } + +// PutValue +func (monthTraits) PutValue(b []byte, v MonthInterval) { + endian.Native.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type MonthInterval. +// +// NOTE: len(b) must be a multiple of MonthIntervalSizeBytes. +func (monthTraits) CastFromBytes(b []byte) []MonthInterval { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []MonthInterval + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / MonthIntervalSizeBytes + s.Cap = h.Cap / MonthIntervalSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (monthTraits) CastToBytes(b []MonthInterval) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * MonthIntervalSizeBytes + s.Cap = h.Cap * MonthIntervalSizeBytes + + return res +} + +// Copy copies src to dst. +func (monthTraits) Copy(dst, src []MonthInterval) { copy(dst, src) } + +// DayTimeInterval traits + +const ( + // DayTimeIntervalSizeBytes specifies the number of bytes required to store a single DayTimeInterval in memory + DayTimeIntervalSizeBytes = int(unsafe.Sizeof(DayTimeInterval{})) +) + +type daytimeTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (daytimeTraits) BytesRequired(n int) int { return DayTimeIntervalSizeBytes * n } + +// PutValue +func (daytimeTraits) PutValue(b []byte, v DayTimeInterval) { + endian.Native.PutUint32(b[0:4], uint32(v.Days)) + endian.Native.PutUint32(b[4:8], uint32(v.Milliseconds)) +} + +// CastFromBytes reinterprets the slice b to a slice of type DayTimeInterval. +// +// NOTE: len(b) must be a multiple of DayTimeIntervalSizeBytes. +func (daytimeTraits) CastFromBytes(b []byte) []DayTimeInterval { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []DayTimeInterval + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / DayTimeIntervalSizeBytes + s.Cap = h.Cap / DayTimeIntervalSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (daytimeTraits) CastToBytes(b []DayTimeInterval) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * DayTimeIntervalSizeBytes + s.Cap = h.Cap * DayTimeIntervalSizeBytes + + return res +} + +// Copy copies src to dst. +func (daytimeTraits) Copy(dst, src []DayTimeInterval) { copy(dst, src) } + +// DayTimeInterval traits + +const ( + // MonthDayNanoIntervalSizeBytes specifies the number of bytes required to store a single DayTimeInterval in memory + MonthDayNanoIntervalSizeBytes = int(unsafe.Sizeof(MonthDayNanoInterval{})) +) + +type monthDayNanoTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (monthDayNanoTraits) BytesRequired(n int) int { return MonthDayNanoIntervalSizeBytes * n } + +// PutValue +func (monthDayNanoTraits) PutValue(b []byte, v MonthDayNanoInterval) { + endian.Native.PutUint32(b[0:4], uint32(v.Months)) + endian.Native.PutUint32(b[4:8], uint32(v.Days)) + endian.Native.PutUint64(b[8:], uint64(v.Nanoseconds)) +} + +// CastFromBytes reinterprets the slice b to a slice of type MonthDayNanoInterval. +// +// NOTE: len(b) must be a multiple of MonthDayNanoIntervalSizeBytes. +func (monthDayNanoTraits) CastFromBytes(b []byte) []MonthDayNanoInterval { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []MonthDayNanoInterval + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / MonthDayNanoIntervalSizeBytes + s.Cap = h.Cap / MonthDayNanoIntervalSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (monthDayNanoTraits) CastToBytes(b []MonthDayNanoInterval) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * MonthDayNanoIntervalSizeBytes + s.Cap = h.Cap * MonthDayNanoIntervalSizeBytes + + return res +} + +// Copy copies src to dst. +func (monthDayNanoTraits) Copy(dst, src []MonthDayNanoInterval) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go new file mode 100644 index 00000000..725316c7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go @@ -0,0 +1,814 @@ +// Code generated by type_traits_numeric.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "math" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/endian" +) + +var ( + Int64Traits int64Traits + Uint64Traits uint64Traits + Float64Traits float64Traits + Int32Traits int32Traits + Uint32Traits uint32Traits + Float32Traits float32Traits + Int16Traits int16Traits + Uint16Traits uint16Traits + Int8Traits int8Traits + Uint8Traits uint8Traits + TimestampTraits timestampTraits + Time32Traits time32Traits + Time64Traits time64Traits + Date32Traits date32Traits + Date64Traits date64Traits + DurationTraits durationTraits +) + +// Int64 traits + +const ( + // Int64SizeBytes specifies the number of bytes required to store a single int64 in memory + Int64SizeBytes = int(unsafe.Sizeof(int64(0))) +) + +type int64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int64Traits) BytesRequired(n int) int { return Int64SizeBytes * n } + +// PutValue +func (int64Traits) PutValue(b []byte, v int64) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int64. +// +// NOTE: len(b) must be a multiple of Int64SizeBytes. +func (int64Traits) CastFromBytes(b []byte) []int64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int64SizeBytes + s.Cap = h.Cap / Int64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int64Traits) CastToBytes(b []int64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int64SizeBytes + s.Cap = h.Cap * Int64SizeBytes + + return res +} + +// Copy copies src to dst. +func (int64Traits) Copy(dst, src []int64) { copy(dst, src) } + +// Uint64 traits + +const ( + // Uint64SizeBytes specifies the number of bytes required to store a single uint64 in memory + Uint64SizeBytes = int(unsafe.Sizeof(uint64(0))) +) + +type uint64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint64Traits) BytesRequired(n int) int { return Uint64SizeBytes * n } + +// PutValue +func (uint64Traits) PutValue(b []byte, v uint64) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint64. +// +// NOTE: len(b) must be a multiple of Uint64SizeBytes. +func (uint64Traits) CastFromBytes(b []byte) []uint64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint64SizeBytes + s.Cap = h.Cap / Uint64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint64Traits) CastToBytes(b []uint64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint64SizeBytes + s.Cap = h.Cap * Uint64SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint64Traits) Copy(dst, src []uint64) { copy(dst, src) } + +// Float64 traits + +const ( + // Float64SizeBytes specifies the number of bytes required to store a single float64 in memory + Float64SizeBytes = int(unsafe.Sizeof(float64(0))) +) + +type float64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float64Traits) BytesRequired(n int) int { return Float64SizeBytes * n } + +// PutValue +func (float64Traits) PutValue(b []byte, v float64) { + endian.Native.PutUint64(b, math.Float64bits(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type float64. +// +// NOTE: len(b) must be a multiple of Float64SizeBytes. +func (float64Traits) CastFromBytes(b []byte) []float64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float64SizeBytes + s.Cap = h.Cap / Float64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float64Traits) CastToBytes(b []float64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float64SizeBytes + s.Cap = h.Cap * Float64SizeBytes + + return res +} + +// Copy copies src to dst. +func (float64Traits) Copy(dst, src []float64) { copy(dst, src) } + +// Int32 traits + +const ( + // Int32SizeBytes specifies the number of bytes required to store a single int32 in memory + Int32SizeBytes = int(unsafe.Sizeof(int32(0))) +) + +type int32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int32Traits) BytesRequired(n int) int { return Int32SizeBytes * n } + +// PutValue +func (int32Traits) PutValue(b []byte, v int32) { + endian.Native.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int32. +// +// NOTE: len(b) must be a multiple of Int32SizeBytes. +func (int32Traits) CastFromBytes(b []byte) []int32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int32SizeBytes + s.Cap = h.Cap / Int32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int32Traits) CastToBytes(b []int32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int32SizeBytes + s.Cap = h.Cap * Int32SizeBytes + + return res +} + +// Copy copies src to dst. +func (int32Traits) Copy(dst, src []int32) { copy(dst, src) } + +// Uint32 traits + +const ( + // Uint32SizeBytes specifies the number of bytes required to store a single uint32 in memory + Uint32SizeBytes = int(unsafe.Sizeof(uint32(0))) +) + +type uint32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint32Traits) BytesRequired(n int) int { return Uint32SizeBytes * n } + +// PutValue +func (uint32Traits) PutValue(b []byte, v uint32) { + endian.Native.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint32. +// +// NOTE: len(b) must be a multiple of Uint32SizeBytes. +func (uint32Traits) CastFromBytes(b []byte) []uint32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint32SizeBytes + s.Cap = h.Cap / Uint32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint32Traits) CastToBytes(b []uint32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint32SizeBytes + s.Cap = h.Cap * Uint32SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint32Traits) Copy(dst, src []uint32) { copy(dst, src) } + +// Float32 traits + +const ( + // Float32SizeBytes specifies the number of bytes required to store a single float32 in memory + Float32SizeBytes = int(unsafe.Sizeof(float32(0))) +) + +type float32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (float32Traits) BytesRequired(n int) int { return Float32SizeBytes * n } + +// PutValue +func (float32Traits) PutValue(b []byte, v float32) { + endian.Native.PutUint32(b, math.Float32bits(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type float32. +// +// NOTE: len(b) must be a multiple of Float32SizeBytes. +func (float32Traits) CastFromBytes(b []byte) []float32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []float32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Float32SizeBytes + s.Cap = h.Cap / Float32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (float32Traits) CastToBytes(b []float32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Float32SizeBytes + s.Cap = h.Cap * Float32SizeBytes + + return res +} + +// Copy copies src to dst. +func (float32Traits) Copy(dst, src []float32) { copy(dst, src) } + +// Int16 traits + +const ( + // Int16SizeBytes specifies the number of bytes required to store a single int16 in memory + Int16SizeBytes = int(unsafe.Sizeof(int16(0))) +) + +type int16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int16Traits) BytesRequired(n int) int { return Int16SizeBytes * n } + +// PutValue +func (int16Traits) PutValue(b []byte, v int16) { + endian.Native.PutUint16(b, uint16(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type int16. +// +// NOTE: len(b) must be a multiple of Int16SizeBytes. +func (int16Traits) CastFromBytes(b []byte) []int16 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int16 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int16SizeBytes + s.Cap = h.Cap / Int16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int16Traits) CastToBytes(b []int16) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int16SizeBytes + s.Cap = h.Cap * Int16SizeBytes + + return res +} + +// Copy copies src to dst. +func (int16Traits) Copy(dst, src []int16) { copy(dst, src) } + +// Uint16 traits + +const ( + // Uint16SizeBytes specifies the number of bytes required to store a single uint16 in memory + Uint16SizeBytes = int(unsafe.Sizeof(uint16(0))) +) + +type uint16Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint16Traits) BytesRequired(n int) int { return Uint16SizeBytes * n } + +// PutValue +func (uint16Traits) PutValue(b []byte, v uint16) { + endian.Native.PutUint16(b, uint16(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (uint16Traits) CastFromBytes(b []byte) []uint16 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint16 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint16SizeBytes + s.Cap = h.Cap / Uint16SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint16Traits) CastToBytes(b []uint16) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint16SizeBytes + s.Cap = h.Cap * Uint16SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint16Traits) Copy(dst, src []uint16) { copy(dst, src) } + +// Int8 traits + +const ( + // Int8SizeBytes specifies the number of bytes required to store a single int8 in memory + Int8SizeBytes = int(unsafe.Sizeof(int8(0))) +) + +type int8Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (int8Traits) BytesRequired(n int) int { return Int8SizeBytes * n } + +// PutValue +func (int8Traits) PutValue(b []byte, v int8) { + b[0] = byte(v) +} + +// CastFromBytes reinterprets the slice b to a slice of type int8. +// +// NOTE: len(b) must be a multiple of Int8SizeBytes. +func (int8Traits) CastFromBytes(b []byte) []int8 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []int8 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int8SizeBytes + s.Cap = h.Cap / Int8SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (int8Traits) CastToBytes(b []int8) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int8SizeBytes + s.Cap = h.Cap * Int8SizeBytes + + return res +} + +// Copy copies src to dst. +func (int8Traits) Copy(dst, src []int8) { copy(dst, src) } + +// Uint8 traits + +const ( + // Uint8SizeBytes specifies the number of bytes required to store a single uint8 in memory + Uint8SizeBytes = int(unsafe.Sizeof(uint8(0))) +) + +type uint8Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (uint8Traits) BytesRequired(n int) int { return Uint8SizeBytes * n } + +// PutValue +func (uint8Traits) PutValue(b []byte, v uint8) { + b[0] = byte(v) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint8. +// +// NOTE: len(b) must be a multiple of Uint8SizeBytes. +func (uint8Traits) CastFromBytes(b []byte) []uint8 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []uint8 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Uint8SizeBytes + s.Cap = h.Cap / Uint8SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (uint8Traits) CastToBytes(b []uint8) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Uint8SizeBytes + s.Cap = h.Cap * Uint8SizeBytes + + return res +} + +// Copy copies src to dst. +func (uint8Traits) Copy(dst, src []uint8) { copy(dst, src) } + +// Timestamp traits + +const ( + // TimestampSizeBytes specifies the number of bytes required to store a single Timestamp in memory + TimestampSizeBytes = int(unsafe.Sizeof(Timestamp(0))) +) + +type timestampTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (timestampTraits) BytesRequired(n int) int { return TimestampSizeBytes * n } + +// PutValue +func (timestampTraits) PutValue(b []byte, v Timestamp) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Timestamp. +// +// NOTE: len(b) must be a multiple of TimestampSizeBytes. +func (timestampTraits) CastFromBytes(b []byte) []Timestamp { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Timestamp + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / TimestampSizeBytes + s.Cap = h.Cap / TimestampSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (timestampTraits) CastToBytes(b []Timestamp) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * TimestampSizeBytes + s.Cap = h.Cap * TimestampSizeBytes + + return res +} + +// Copy copies src to dst. +func (timestampTraits) Copy(dst, src []Timestamp) { copy(dst, src) } + +// Time32 traits + +const ( + // Time32SizeBytes specifies the number of bytes required to store a single Time32 in memory + Time32SizeBytes = int(unsafe.Sizeof(Time32(0))) +) + +type time32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (time32Traits) BytesRequired(n int) int { return Time32SizeBytes * n } + +// PutValue +func (time32Traits) PutValue(b []byte, v Time32) { + endian.Native.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Time32. +// +// NOTE: len(b) must be a multiple of Time32SizeBytes. +func (time32Traits) CastFromBytes(b []byte) []Time32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Time32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Time32SizeBytes + s.Cap = h.Cap / Time32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (time32Traits) CastToBytes(b []Time32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Time32SizeBytes + s.Cap = h.Cap * Time32SizeBytes + + return res +} + +// Copy copies src to dst. +func (time32Traits) Copy(dst, src []Time32) { copy(dst, src) } + +// Time64 traits + +const ( + // Time64SizeBytes specifies the number of bytes required to store a single Time64 in memory + Time64SizeBytes = int(unsafe.Sizeof(Time64(0))) +) + +type time64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (time64Traits) BytesRequired(n int) int { return Time64SizeBytes * n } + +// PutValue +func (time64Traits) PutValue(b []byte, v Time64) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Time64. +// +// NOTE: len(b) must be a multiple of Time64SizeBytes. +func (time64Traits) CastFromBytes(b []byte) []Time64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Time64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Time64SizeBytes + s.Cap = h.Cap / Time64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (time64Traits) CastToBytes(b []Time64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Time64SizeBytes + s.Cap = h.Cap * Time64SizeBytes + + return res +} + +// Copy copies src to dst. +func (time64Traits) Copy(dst, src []Time64) { copy(dst, src) } + +// Date32 traits + +const ( + // Date32SizeBytes specifies the number of bytes required to store a single Date32 in memory + Date32SizeBytes = int(unsafe.Sizeof(Date32(0))) +) + +type date32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (date32Traits) BytesRequired(n int) int { return Date32SizeBytes * n } + +// PutValue +func (date32Traits) PutValue(b []byte, v Date32) { + endian.Native.PutUint32(b, uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Date32. +// +// NOTE: len(b) must be a multiple of Date32SizeBytes. +func (date32Traits) CastFromBytes(b []byte) []Date32 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Date32 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Date32SizeBytes + s.Cap = h.Cap / Date32SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (date32Traits) CastToBytes(b []Date32) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Date32SizeBytes + s.Cap = h.Cap * Date32SizeBytes + + return res +} + +// Copy copies src to dst. +func (date32Traits) Copy(dst, src []Date32) { copy(dst, src) } + +// Date64 traits + +const ( + // Date64SizeBytes specifies the number of bytes required to store a single Date64 in memory + Date64SizeBytes = int(unsafe.Sizeof(Date64(0))) +) + +type date64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (date64Traits) BytesRequired(n int) int { return Date64SizeBytes * n } + +// PutValue +func (date64Traits) PutValue(b []byte, v Date64) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Date64. +// +// NOTE: len(b) must be a multiple of Date64SizeBytes. +func (date64Traits) CastFromBytes(b []byte) []Date64 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Date64 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Date64SizeBytes + s.Cap = h.Cap / Date64SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (date64Traits) CastToBytes(b []Date64) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Date64SizeBytes + s.Cap = h.Cap * Date64SizeBytes + + return res +} + +// Copy copies src to dst. +func (date64Traits) Copy(dst, src []Date64) { copy(dst, src) } + +// Duration traits + +const ( + // DurationSizeBytes specifies the number of bytes required to store a single Duration in memory + DurationSizeBytes = int(unsafe.Sizeof(Duration(0))) +) + +type durationTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (durationTraits) BytesRequired(n int) int { return DurationSizeBytes * n } + +// PutValue +func (durationTraits) PutValue(b []byte, v Duration) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Duration. +// +// NOTE: len(b) must be a multiple of DurationSizeBytes. +func (durationTraits) CastFromBytes(b []byte) []Duration { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Duration + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / DurationSizeBytes + s.Cap = h.Cap / DurationSizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (durationTraits) CastToBytes(b []Duration) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * DurationSizeBytes + s.Cap = h.Cap * DurationSizeBytes + + return res +} + +// Copy copies src to dst. +func (durationTraits) Copy(dst, src []Duration) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl new file mode 100644 index 00000000..8b18a924 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "math" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/endian" +) + +var ( +{{range .In}} + {{.Name}}Traits {{.name}}Traits +{{- end}} +) + +{{range .In}} +// {{.Name}} traits + +const ( + // {{.Name}}SizeBytes specifies the number of bytes required to store a single {{.Type}} in memory + {{.Name}}SizeBytes = int(unsafe.Sizeof({{.Type}}({{.Default}}))) +) + +type {{.name}}Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func ({{.name}}Traits) BytesRequired(n int) int { return {{.Name}}SizeBytes * n } + +// PutValue +func ({{.name}}Traits) PutValue(b []byte, v {{.Type}}) { +{{- if eq .Type "float32" -}} + endian.Native.PutUint32(b, math.Float32bits(v)) +{{- else if eq .Type "float64" -}} + endian.Native.PutUint64(b, math.Float64bits(v)) +{{- else if eq .Size "1" -}} + b[0] = byte(v) +{{- else if eq .Size "2" -}} + endian.Native.PutUint16(b, uint16(v)) +{{- else if eq .Size "4" -}} + endian.Native.PutUint32(b, uint32(v)) +{{- else if eq .Size "8" -}} + endian.Native.PutUint64(b, uint64(v)) +{{- else -}} + panic("invalid type {{.Type}}") +{{end}} +} + +// CastFromBytes reinterprets the slice b to a slice of type {{.Type}}. +// +// NOTE: len(b) must be a multiple of {{.Name}}SizeBytes. +func ({{.name}}Traits) CastFromBytes(b []byte) []{{.Type}} { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []{{.Type}} + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len/{{.Name}}SizeBytes + s.Cap = h.Cap/{{.Name}}SizeBytes + + return res +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func ({{.name}}Traits) CastToBytes(b []{{.Type}}) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len*{{.Name}}SizeBytes + s.Cap = h.Cap*{{.Name}}SizeBytes + + return res +} + +// Copy copies src to dst. +func ({{.name}}Traits) Copy(dst, src []{{.Type}}) { copy(dst, src) } +{{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl new file mode 100644 index 00000000..8bb1897f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow_test + +import ( + "reflect" + "testing" + + "github.com/apache/arrow/go/v12/arrow" +) + +{{- range .In}} + +func Test{{.Name}}Traits(t *testing.T) { + const N = 10 + b1 := arrow.{{.Name}}Traits.CastToBytes([]{{or .QualifiedType .Type}}{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + }) + + b2 := make([]byte, arrow.{{.Name}}Traits.BytesRequired(N)) + for i := 0; i < N; i++ { + beg := i * arrow.{{.Name}}SizeBytes + end := (i + 1) * arrow.{{.Name}}SizeBytes + arrow.{{.Name}}Traits.PutValue(b2[beg:end], {{or .QualifiedType .Type}}(i)) + } + + if !reflect.DeepEqual(b1, b2) { + v1 := arrow.{{.Name}}Traits.CastFromBytes(b1) + v2 := arrow.{{.Name}}Traits.CastFromBytes(b2) + t.Fatalf("invalid values:\nb1=%v\nb2=%v\nv1=%v\nv2=%v\n", b1, b2, v1, v2) + } + + v1 := arrow.{{.Name}}Traits.CastFromBytes(b1) + for i, v := range v1 { + if got, want := v, {{or .QualifiedType .Type}}(i); got != want { + t.Fatalf("invalid value[%d]. got=%v, want=%v", i, got, want) + } + } + + v2 := make([]{{or .QualifiedType .Type}}, N) + arrow.{{.Name}}Traits.Copy(v2, v1) + + if !reflect.DeepEqual(v1, v2) { + t.Fatalf("invalid values:\nv1=%v\nv2=%v\n", v1, v2) + } +} +{{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/unionmode_string.go b/vendor/github.com/apache/arrow/go/v12/arrow/unionmode_string.go new file mode 100644 index 00000000..394d4f66 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/arrow/unionmode_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=UnionMode -linecomment"; DO NOT EDIT. + +package arrow + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SparseMode-2] + _ = x[DenseMode-3] +} + +const _UnionMode_name = "SPARSEDENSE" + +var _UnionMode_index = [...]uint8{0, 6, 11} + +func (i UnionMode) String() string { + i -= 2 + if i < 0 || i >= UnionMode(len(_UnionMode_index)-1) { + return "UnionMode(" + strconv.FormatInt(int64(i+2), 10) + ")" + } + return _UnionMode_name[_UnionMode_index[i]:_UnionMode_index[i+1]] +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go new file mode 100644 index 00000000..8f724ebb --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go @@ -0,0 +1,452 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutils + +import ( + "math" + "math/bits" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/internal/utils" +) + +func loadWord(byt []byte) uint64 { + return utils.ToLEUint64(*(*uint64)(unsafe.Pointer(&byt[0]))) +} + +func shiftWord(current, next uint64, shift int64) uint64 { + if shift == 0 { + return current + } + return (current >> shift) | (next << (64 - shift)) +} + +// BitBlockCount is returned by the various bit block counter utilities +// in order to return a length of bits and the population count of that +// slice of bits. +type BitBlockCount struct { + Len int16 + Popcnt int16 +} + +// NoneSet returns true if ALL the bits were 0 in this set, ie: Popcnt == 0 +func (b BitBlockCount) NoneSet() bool { + return b.Popcnt == 0 +} + +// AllSet returns true if ALL the bits were 1 in this set, ie: Popcnt == Len +func (b BitBlockCount) AllSet() bool { + return b.Len == b.Popcnt +} + +// BitBlockCounter is a utility for grabbing chunks of a bitmap at a time and efficiently +// counting the number of bits which are 1. +type BitBlockCounter struct { + bitmap []byte + bitsRemaining int64 + bitOffset int8 +} + +const ( + wordBits int64 = 64 + fourWordsBits int64 = wordBits * 4 +) + +// NewBitBlockCounter returns a BitBlockCounter for the passed bitmap starting at startOffset +// of length nbits. +func NewBitBlockCounter(bitmap []byte, startOffset, nbits int64) *BitBlockCounter { + return &BitBlockCounter{ + bitmap: bitmap[startOffset/8:], + bitsRemaining: nbits, + bitOffset: int8(startOffset % 8), + } +} + +// getBlockSlow is for returning a block of the requested size when there aren't +// enough bits remaining to do a full word computation. +func (b *BitBlockCounter) getBlockSlow(blockSize int64) BitBlockCount { + runlen := int16(utils.Min(b.bitsRemaining, blockSize)) + popcnt := int16(bitutil.CountSetBits(b.bitmap, int(b.bitOffset), int(runlen))) + b.bitsRemaining -= int64(runlen) + b.bitmap = b.bitmap[runlen/8:] + return BitBlockCount{runlen, popcnt} +} + +// NextFourWords returns the next run of available bits, usually 256. The +// returned pair contains the size of run and the number of true values. +// The last block will have a length less than 256 if the bitmap length +// is not a multiple of 256, and will return 0-length blocks in subsequent +// invocations. +func (b *BitBlockCounter) NextFourWords() BitBlockCount { + if b.bitsRemaining == 0 { + return BitBlockCount{0, 0} + } + + totalPopcnt := 0 + if b.bitOffset == 0 { + // if we're aligned at 0 bitoffset, then we can easily just jump from + // word to word nice and easy. + if b.bitsRemaining < fourWordsBits { + return b.getBlockSlow(fourWordsBits) + } + totalPopcnt += bits.OnesCount64(loadWord(b.bitmap)) + totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[8:])) + totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[16:])) + totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[24:])) + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if b.bitsRemaining < 5*fourWordsBits-int64(b.bitOffset) { + return b.getBlockSlow(fourWordsBits) + } + + current := loadWord(b.bitmap) + next := loadWord(b.bitmap[8:]) + totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset))) + + current = next + next = loadWord(b.bitmap[16:]) + totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset))) + + current = next + next = loadWord(b.bitmap[24:]) + totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset))) + + current = next + next = loadWord(b.bitmap[32:]) + totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset))) + } + b.bitmap = b.bitmap[bitutil.BytesForBits(fourWordsBits):] + b.bitsRemaining -= fourWordsBits + return BitBlockCount{256, int16(totalPopcnt)} +} + +// NextWord returns the next run of available bits, usually 64. The returned +// pair contains the size of run and the number of true values. The last +// block will have a length less than 64 if the bitmap length is not a +// multiple of 64, and will return 0-length blocks in subsequent +// invocations. +func (b *BitBlockCounter) NextWord() BitBlockCount { + if b.bitsRemaining == 0 { + return BitBlockCount{0, 0} + } + popcnt := 0 + if b.bitOffset == 0 { + if b.bitsRemaining < wordBits { + return b.getBlockSlow(wordBits) + } + popcnt = bits.OnesCount64(loadWord(b.bitmap)) + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if b.bitsRemaining < (2*wordBits - int64(b.bitOffset)) { + return b.getBlockSlow(wordBits) + } + popcnt = bits.OnesCount64(shiftWord(loadWord(b.bitmap), loadWord(b.bitmap[8:]), int64(b.bitOffset))) + } + b.bitmap = b.bitmap[wordBits/8:] + b.bitsRemaining -= wordBits + return BitBlockCount{64, int16(popcnt)} +} + +// OptionalBitBlockCounter is a useful counter to iterate through a possibly +// non-existent validity bitmap to allow us to write one code path for both +// the with-nulls and no-nulls cases without giving up a lot of performance. +type OptionalBitBlockCounter struct { + hasBitmap bool + pos int64 + len int64 + counter *BitBlockCounter +} + +// NewOptionalBitBlockCounter constructs and returns a new bit block counter that +// can properly handle the case when a bitmap is null, if it is guaranteed that the +// the bitmap is not nil, then prefer NewBitBlockCounter here. +func NewOptionalBitBlockCounter(bitmap []byte, offset, length int64) *OptionalBitBlockCounter { + var counter *BitBlockCounter + if bitmap != nil { + counter = NewBitBlockCounter(bitmap, offset, length) + } + return &OptionalBitBlockCounter{ + hasBitmap: bitmap != nil, + pos: 0, + len: length, + counter: counter, + } +} + +// NextBlock returns block count for next word when the bitmap is available otherwise +// return a block with length up to INT16_MAX when there is no validity +// bitmap (so all the referenced values are not null). +func (obc *OptionalBitBlockCounter) NextBlock() BitBlockCount { + const maxBlockSize = math.MaxInt16 + if obc.hasBitmap { + block := obc.counter.NextWord() + obc.pos += int64(block.Len) + return block + } + + blockSize := int16(utils.Min(maxBlockSize, obc.len-obc.pos)) + obc.pos += int64(blockSize) + // all values are non-null + return BitBlockCount{blockSize, blockSize} +} + +// NextWord is like NextBlock, but returns a word-sized block even when there is no +// validity bitmap +func (obc *OptionalBitBlockCounter) NextWord() BitBlockCount { + const wordsize = 64 + if obc.hasBitmap { + block := obc.counter.NextWord() + obc.pos += int64(block.Len) + return block + } + blockSize := int16(utils.Min(wordsize, obc.len-obc.pos)) + obc.pos += int64(blockSize) + // all values are non-null + return BitBlockCount{blockSize, blockSize} +} + +// VisitBitBlocks is a utility for easily iterating through the blocks of bits in a bitmap, +// calling the appropriate visitValid/visitInvalid function as we iterate through the bits. +// visitValid is called with the bitoffset of the valid bit. Don't use this inside a tight +// loop when performance is needed and instead prefer manually constructing these loops +// in that scenario. +func VisitBitBlocks(bitmap []byte, offset, length int64, visitValid func(pos int64), visitInvalid func()) { + counter := NewOptionalBitBlockCounter(bitmap, offset, length) + pos := int64(0) + for pos < length { + block := counter.NextBlock() + if block.AllSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + visitValid(pos) + } + } else if block.NoneSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + visitInvalid() + } + } else { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + if bitutil.BitIsSet(bitmap, int(offset+pos)) { + visitValid(pos) + } else { + visitInvalid() + } + } + } + } +} + +// VisitBitBlocks is a utility for easily iterating through the blocks of bits in a bitmap, +// calling the appropriate visitValid/visitInvalid function as we iterate through the bits. +// visitValid is called with the bitoffset of the valid bit. Don't use this inside a tight +// loop when performance is needed and instead prefer manually constructing these loops +// in that scenario. +func VisitBitBlocksShort(bitmap []byte, offset, length int64, visitValid func(pos int64) error, visitInvalid func() error) error { + counter := NewOptionalBitBlockCounter(bitmap, offset, length) + pos := int64(0) + for pos < length { + block := counter.NextBlock() + if block.AllSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + if err := visitValid(pos); err != nil { + return err + } + } + } else if block.NoneSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + if err := visitInvalid(); err != nil { + return err + } + } + } else { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + if bitutil.BitIsSet(bitmap, int(offset+pos)) { + if err := visitValid(pos); err != nil { + return err + } + } else { + if err := visitInvalid(); err != nil { + return err + } + } + } + } + } + return nil +} + +func VisitTwoBitBlocks(leftBitmap, rightBitmap []byte, leftOffset, rightOffset int64, len int64, visitValid func(pos int64), visitNull func()) { + if leftBitmap == nil || rightBitmap == nil { + // at most one is present + if leftBitmap == nil { + VisitBitBlocks(rightBitmap, rightOffset, len, visitValid, visitNull) + } else { + VisitBitBlocks(leftBitmap, leftOffset, len, visitValid, visitNull) + } + return + } + + bitCounter := NewBinaryBitBlockCounter(leftBitmap, rightBitmap, leftOffset, rightOffset, len) + var pos int64 + for pos < len { + block := bitCounter.NextAndWord() + if block.AllSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + visitValid(pos) + } + } else if block.NoneSet() { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + visitNull() + } + } else { + for i := 0; i < int(block.Len); i, pos = i+1, pos+1 { + if bitutil.BitIsSet(leftBitmap, int(leftOffset+pos)) && bitutil.BitIsSet(rightBitmap, int(rightOffset+pos)) { + visitValid(pos) + } else { + visitNull() + } + } + } + } +} + +type bitOp struct { + bit func(bool, bool) bool + word func(uint64, uint64) uint64 +} + +var ( + bitBlockAnd = bitOp{ + bit: func(a, b bool) bool { return a && b }, + word: func(a, b uint64) uint64 { return a & b }, + } + bitBlockAndNot = bitOp{ + bit: func(a, b bool) bool { return a && !b }, + word: func(a, b uint64) uint64 { return a &^ b }, + } + bitBlockOr = bitOp{ + bit: func(a, b bool) bool { return a || b }, + word: func(a, b uint64) uint64 { return a | b }, + } + bitBlockOrNot = bitOp{ + bit: func(a, b bool) bool { return a || !b }, + word: func(a, b uint64) uint64 { return a | ^b }, + } +) + +// BinaryBitBlockCounter computes popcounts on the result of bitwise +// operations between two bitmaps, 64 bits at a time. A 64-bit word +// is loaded from each bitmap, then the popcount is computed on +// e.g. the bitwise-and of the two words +type BinaryBitBlockCounter struct { + left []byte + right []byte + bitsRemaining int64 + leftOffset, rightOffset int64 + + bitsRequiredForWords int64 +} + +// NewBinaryBitBlockCounter constructs a binary bit block counter for +// computing the popcounts on the results of operations between +// the passed in bitmaps, with their respective offsets. +func NewBinaryBitBlockCounter(left, right []byte, leftOffset, rightOffset int64, length int64) *BinaryBitBlockCounter { + ret := &BinaryBitBlockCounter{ + left: left[leftOffset/8:], + right: right[rightOffset/8:], + leftOffset: leftOffset % 8, + rightOffset: rightOffset % 8, + bitsRemaining: length, + } + + leftBitsReq := int64(64) + if ret.leftOffset != 0 { + leftBitsReq = 64 + (64 - ret.leftOffset) + } + rightBitsReq := int64(64) + if ret.rightOffset != 0 { + rightBitsReq = 64 + (64 - ret.rightOffset) + } + + if leftBitsReq > rightBitsReq { + ret.bitsRequiredForWords = leftBitsReq + } else { + ret.bitsRequiredForWords = rightBitsReq + } + + return ret +} + +// NextAndWord returns the popcount of the bitwise-and of the next run +// of available bits, up to 64. The returned pair contains the size of +// the run and the number of true values. the last block will have a +// length less than 64 if the bitmap length is not a multiple of 64, +// and will return 0-length blocks in subsequent invocations +func (b *BinaryBitBlockCounter) NextAndWord() BitBlockCount { return b.nextWord(bitBlockAnd) } + +// NextAndNotWord is like NextAndWord but performs x &^ y on each run +func (b *BinaryBitBlockCounter) NextAndNotWord() BitBlockCount { return b.nextWord(bitBlockAndNot) } + +// NextOrWord is like NextAndWord but performs x | y on each run +func (b *BinaryBitBlockCounter) NextOrWord() BitBlockCount { return b.nextWord(bitBlockOr) } + +// NextOrWord is like NextAndWord but performs x | ^y on each run +func (b *BinaryBitBlockCounter) NextOrNotWord() BitBlockCount { return b.nextWord(bitBlockOrNot) } + +func (b *BinaryBitBlockCounter) nextWord(op bitOp) BitBlockCount { + if b.bitsRemaining == 0 { + return BitBlockCount{} + } + + // when offset is >0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic + if b.bitsRemaining < b.bitsRequiredForWords { + runLength := int16(b.bitsRemaining) + if runLength > int16(wordBits) { + runLength = int16(wordBits) + } + + var popcount int16 + for i := int16(0); i < runLength; i++ { + if op.bit(bitutil.BitIsSet(b.left, int(b.leftOffset)+int(i)), + bitutil.BitIsSet(b.right, int(b.rightOffset)+int(i))) { + popcount++ + } + } + // this code path should trigger _at most_ 2 times. in the "two times" + // case, the first time the run length will be a multiple of 8. + b.left = b.left[runLength/8:] + b.right = b.right[runLength/8:] + b.bitsRemaining -= int64(runLength) + return BitBlockCount{Len: runLength, Popcnt: popcount} + } + + var popcount int + if b.leftOffset == 0 && b.rightOffset == 0 { + popcount = bits.OnesCount64(op.word(loadWord(b.left), loadWord(b.right))) + } else { + leftWord := shiftWord(loadWord(b.left), loadWord(b.left[8:]), b.leftOffset) + rightWord := shiftWord(loadWord(b.right), loadWord(b.right[8:]), b.rightOffset) + popcount = bits.OnesCount64(op.word(leftWord, rightWord)) + } + b.left = b.left[wordBits/8:] + b.right = b.right[wordBits/8:] + b.bitsRemaining -= wordBits + return BitBlockCount{Len: int16(wordBits), Popcnt: int16(popcount)} +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go new file mode 100644 index 00000000..5ff8d518 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutils + +import ( + "encoding/binary" + "fmt" + "math/bits" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/internal/utils" +) + +// BitRun represents a run of bits with the same value of length Len +// with Set representing if the group of bits were 1 or 0. +type BitRun struct { + Len int64 + Set bool +} + +// BitRunReader is an interface that is usable by multiple callers to provide +// multiple types of bit run readers such as a reverse reader and so on. +// +// It's a convenience interface for counting contiguous set/unset bits in a bitmap. +// In places where BitBlockCounter can be used, then it would be preferred to use that +// as it would be faster than using BitRunReader. +type BitRunReader interface { + NextRun() BitRun +} + +func (b BitRun) String() string { + return fmt.Sprintf("{Length: %d, set=%t}", b.Len, b.Set) +} + +type bitRunReader struct { + bitmap []byte + pos int64 + length int64 + word uint64 + curRunBitSet bool +} + +// NewBitRunReader returns a reader for the given bitmap, offset and length that +// grabs runs of the same value bit at a time for easy iteration. +func NewBitRunReader(bitmap []byte, offset int64, length int64) BitRunReader { + ret := &bitRunReader{ + bitmap: bitmap[offset/8:], + pos: offset % 8, + length: (offset % 8) + length, + } + + if length == 0 { + return ret + } + + ret.curRunBitSet = bitutil.BitIsNotSet(bitmap, int(offset)) + bitsRemaining := length + ret.pos + ret.loadWord(bitsRemaining) + ret.word = ret.word &^ LeastSignificantBitMask(ret.pos) + return ret +} + +// NextRun returns a new BitRun containing the number of contiguous bits with the +// same value. Len == 0 indicates the end of the bitmap. +func (b *bitRunReader) NextRun() BitRun { + if b.pos >= b.length { + return BitRun{0, false} + } + + // This implementation relies on a efficient implementations of + // CountTrailingZeros and assumes that runs are more often then + // not. The logic is to incrementally find the next bit change + // from the current position. This is done by zeroing all + // bits in word_ up to position_ and using the TrailingZeroCount + // to find the index of the next set bit. + + // The runs alternate on each call, so flip the bit. + b.curRunBitSet = !b.curRunBitSet + + start := b.pos + startOffset := start & 63 + + // Invert the word for proper use of CountTrailingZeros and + // clear bits so CountTrailingZeros can do it magic. + b.word = ^b.word &^ LeastSignificantBitMask(startOffset) + + // Go forward until the next change from unset to set. + newbits := int64(bits.TrailingZeros64(b.word)) - startOffset + b.pos += newbits + + if IsMultipleOf64(b.pos) && b.pos < b.length { + b.advanceUntilChange() + } + return BitRun{b.pos - start, b.curRunBitSet} +} + +func (b *bitRunReader) advanceUntilChange() { + newbits := int64(0) + for { + b.bitmap = b.bitmap[arrow.Uint64SizeBytes:] + b.loadNextWord() + newbits = int64(bits.TrailingZeros64(b.word)) + b.pos += newbits + if !IsMultipleOf64(b.pos) || b.pos >= b.length || newbits <= 0 { + break + } + } +} + +func (b *bitRunReader) loadNextWord() { + b.loadWord(b.length - b.pos) +} + +func (b *bitRunReader) loadWord(bitsRemaining int64) { + b.word = 0 + if bitsRemaining >= 64 { + b.word = binary.LittleEndian.Uint64(b.bitmap) + } else { + nbytes := bitutil.BytesForBits(bitsRemaining) + wordptr := (*(*[8]byte)(unsafe.Pointer(&b.word)))[:] + copy(wordptr, b.bitmap[:nbytes]) + + bitutil.SetBitTo(wordptr, int(bitsRemaining), bitutil.BitIsNotSet(wordptr, int(bitsRemaining-1))) + // reset the value to little endian for big endian architectures + b.word = utils.ToLEUint64(b.word) + } + + // Two cases: + // 1. For unset, CountTrailingZeros works naturally so we don't + // invert the word. + // 2. Otherwise invert so we can use CountTrailingZeros. + if b.curRunBitSet { + b.word = ^b.word + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go new file mode 100644 index 00000000..6369c094 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go @@ -0,0 +1,361 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutils + +import ( + "encoding/binary" + "math/bits" + + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/internal/utils" +) + +// IsMultipleOf64 returns whether v is a multiple of 64. +func IsMultipleOf64(v int64) bool { return v&63 == 0 } + +// LeastSignificantBitMask returns a bit mask to return the least significant +// bits for a value starting from the bit index passed in. ie: if you want a +// mask for the 4 least significant bits, you call LeastSignificantBitMask(4) +func LeastSignificantBitMask(index int64) uint64 { + return (uint64(1) << index) - 1 +} + +// SetBitRun describes a run of contiguous set bits in a bitmap with Pos being +// the starting position of the run and Length being the number of bits. +type SetBitRun struct { + Pos int64 + Length int64 +} + +// AtEnd returns true if this bit run is the end of the set by checking +// that the length is 0. +func (s SetBitRun) AtEnd() bool { + return s.Length == 0 +} + +// Equal returns whether rhs is the same run as s +func (s SetBitRun) Equal(rhs SetBitRun) bool { + return s.Pos == rhs.Pos && s.Length == rhs.Length +} + +// SetBitRunReader is an interface for reading groups of contiguous set bits +// from a bitmap. The interface allows us to create different reader implementations +// that share the same interface easily such as a reverse set reader. +type SetBitRunReader interface { + // NextRun will return the next run of contiguous set bits in the bitmap + NextRun() SetBitRun + // Reset allows re-using the reader by providing a new bitmap, offset and length. The arguments + // match the New function for the reader being used. + Reset([]byte, int64, int64) + // VisitSetBitRuns calls visitFn for each set in a loop starting from the current position + // it's roughly equivalent to simply looping, calling NextRun and calling visitFn on the run + // for each run. + VisitSetBitRuns(visitFn VisitFn) error +} + +type baseSetBitRunReader struct { + bitmap []byte + pos int64 + length int64 + remaining int64 + curWord uint64 + curNumBits int32 + reversed bool + + firstBit uint64 +} + +// NewSetBitRunReader returns a SetBitRunReader for the bitmap starting at startOffset which will read +// numvalues bits. +func NewSetBitRunReader(validBits []byte, startOffset, numValues int64) SetBitRunReader { + return newBaseSetBitRunReader(validBits, startOffset, numValues, false) +} + +// NewReverseSetBitRunReader returns a SetBitRunReader like NewSetBitRunReader, except it will +// return runs starting from the end of the bitmap until it reaches startOffset rather than starting +// at startOffset and reading from there. The SetBitRuns will still operate the same, so Pos +// will still be the position of the "left-most" bit of the run or the "start" of the run. It +// just returns runs starting from the end instead of starting from the beginning. +func NewReverseSetBitRunReader(validBits []byte, startOffset, numValues int64) SetBitRunReader { + return newBaseSetBitRunReader(validBits, startOffset, numValues, true) +} + +func newBaseSetBitRunReader(bitmap []byte, startOffset, length int64, reverse bool) *baseSetBitRunReader { + ret := &baseSetBitRunReader{reversed: reverse} + ret.Reset(bitmap, startOffset, length) + return ret +} + +func (br *baseSetBitRunReader) Reset(bitmap []byte, startOffset, length int64) { + br.bitmap = bitmap + br.length = length + br.remaining = length + br.curNumBits = 0 + br.curWord = 0 + + if !br.reversed { + br.pos = startOffset / 8 + br.firstBit = 1 + + bitOffset := int8(startOffset % 8) + if length > 0 && bitOffset != 0 { + br.curNumBits = int32(utils.MinInt(int(length), int(8-bitOffset))) + br.curWord = br.loadPartial(bitOffset, int64(br.curNumBits)) + } + return + } + + br.pos = (startOffset + length) / 8 + br.firstBit = uint64(0x8000000000000000) + endBitOffset := int8((startOffset + length) % 8) + if length > 0 && endBitOffset != 0 { + br.pos++ + br.curNumBits = int32(utils.MinInt(int(length), int(endBitOffset))) + br.curWord = br.loadPartial(8-endBitOffset, int64(br.curNumBits)) + } +} + +func (br *baseSetBitRunReader) consumeBits(word uint64, nbits int32) uint64 { + if br.reversed { + return word << nbits + } + return word >> nbits +} + +func (br *baseSetBitRunReader) countFirstZeros(word uint64) int32 { + if br.reversed { + return int32(bits.LeadingZeros64(word)) + } + return int32(bits.TrailingZeros64(word)) +} + +func (br *baseSetBitRunReader) loadPartial(bitOffset int8, numBits int64) uint64 { + var word [8]byte + nbytes := bitutil.BytesForBits(numBits) + if br.reversed { + br.pos -= nbytes + copy(word[8-nbytes:], br.bitmap[br.pos:br.pos+nbytes]) + return (binary.LittleEndian.Uint64(word[:]) << bitOffset) &^ LeastSignificantBitMask(64-numBits) + } + + copy(word[:], br.bitmap[br.pos:br.pos+nbytes]) + br.pos += nbytes + return (binary.LittleEndian.Uint64(word[:]) >> bitOffset) & LeastSignificantBitMask(numBits) +} + +func (br *baseSetBitRunReader) findCurrentRun() SetBitRun { + nzeros := br.countFirstZeros(br.curWord) + if nzeros >= br.curNumBits { + br.remaining -= int64(br.curNumBits) + br.curWord = 0 + br.curNumBits = 0 + return SetBitRun{0, 0} + } + + br.curWord = br.consumeBits(br.curWord, nzeros) + br.curNumBits -= nzeros + br.remaining -= int64(nzeros) + pos := br.position() + + numOnes := br.countFirstZeros(^br.curWord) + br.curWord = br.consumeBits(br.curWord, numOnes) + br.curNumBits -= numOnes + br.remaining -= int64(numOnes) + return SetBitRun{pos, int64(numOnes)} +} + +func (br *baseSetBitRunReader) position() int64 { + if br.reversed { + return br.remaining + } + return br.length - br.remaining +} + +func (br *baseSetBitRunReader) adjustRun(run SetBitRun) SetBitRun { + if br.reversed { + run.Pos -= run.Length + } + return run +} + +func (br *baseSetBitRunReader) loadFull() (ret uint64) { + if br.reversed { + br.pos -= 8 + } + ret = binary.LittleEndian.Uint64(br.bitmap[br.pos : br.pos+8]) + if !br.reversed { + br.pos += 8 + } + return +} + +func (br *baseSetBitRunReader) skipNextZeros() { + for br.remaining >= 64 { + br.curWord = br.loadFull() + nzeros := br.countFirstZeros(br.curWord) + if nzeros < 64 { + br.curWord = br.consumeBits(br.curWord, nzeros) + br.curNumBits = 64 - nzeros + br.remaining -= int64(nzeros) + return + } + br.remaining -= 64 + } + // run of zeros continues in last bitmap word + if br.remaining > 0 { + br.curWord = br.loadPartial(0, br.remaining) + br.curNumBits = int32(br.remaining) + nzeros := int32(utils.MinInt(int(br.curNumBits), int(br.countFirstZeros(br.curWord)))) + br.curWord = br.consumeBits(br.curWord, nzeros) + br.curNumBits -= nzeros + br.remaining -= int64(nzeros) + } +} + +func (br *baseSetBitRunReader) countNextOnes() int64 { + var length int64 + if ^br.curWord != 0 { + numOnes := br.countFirstZeros(^br.curWord) + br.remaining -= int64(numOnes) + br.curWord = br.consumeBits(br.curWord, numOnes) + br.curNumBits -= numOnes + if br.curNumBits != 0 { + return int64(numOnes) + } + length = int64(numOnes) + } else { + br.remaining -= 64 + br.curNumBits = 0 + length = 64 + } + + for br.remaining >= 64 { + br.curWord = br.loadFull() + numOnes := br.countFirstZeros(^br.curWord) + length += int64(numOnes) + br.remaining -= int64(numOnes) + if numOnes < 64 { + br.curWord = br.consumeBits(br.curWord, numOnes) + br.curNumBits = 64 - numOnes + return length + } + } + + if br.remaining > 0 { + br.curWord = br.loadPartial(0, br.remaining) + br.curNumBits = int32(br.remaining) + numOnes := br.countFirstZeros(^br.curWord) + br.curWord = br.consumeBits(br.curWord, numOnes) + br.curNumBits -= numOnes + br.remaining -= int64(numOnes) + length += int64(numOnes) + } + return length +} + +func (br *baseSetBitRunReader) NextRun() SetBitRun { + var ( + pos int64 = 0 + length int64 = 0 + ) + + if br.curNumBits != 0 { + run := br.findCurrentRun() + if run.Length != 0 && br.curNumBits != 0 { + return br.adjustRun(run) + } + pos = run.Pos + length = run.Length + } + + if length == 0 { + // we didn't get any ones in curWord, so we can skip any zeros + // in the following words + br.skipNextZeros() + if br.remaining == 0 { + return SetBitRun{0, 0} + } + pos = br.position() + } else if br.curNumBits == 0 { + if br.remaining >= 64 { + br.curWord = br.loadFull() + br.curNumBits = 64 + } else if br.remaining > 0 { + br.curWord = br.loadPartial(0, br.remaining) + br.curNumBits = int32(br.remaining) + } else { + return br.adjustRun(SetBitRun{pos, length}) + } + if (br.curWord & br.firstBit) == 0 { + return br.adjustRun(SetBitRun{pos, length}) + } + } + + length += br.countNextOnes() + return br.adjustRun(SetBitRun{pos, length}) +} + +// VisitFn is a callback function for visiting runs of contiguous bits +type VisitFn func(pos int64, length int64) error + +func (br *baseSetBitRunReader) VisitSetBitRuns(visitFn VisitFn) error { + for { + run := br.NextRun() + if run.Length == 0 { + break + } + + if err := visitFn(run.Pos, run.Length); err != nil { + return err + } + } + return nil +} + +// VisitSetBitRuns is just a convenience function for calling NewSetBitRunReader and then VisitSetBitRuns +func VisitSetBitRuns(bitmap []byte, bitmapOffset int64, length int64, visitFn VisitFn) error { + if bitmap == nil { + return visitFn(0, length) + } + rdr := NewSetBitRunReader(bitmap, bitmapOffset, length) + for { + run := rdr.NextRun() + if run.Length == 0 { + break + } + + if err := visitFn(run.Pos, run.Length); err != nil { + return err + } + } + return nil +} + +func VisitSetBitRunsNoErr(bitmap []byte, bitmapOffset int64, length int64, visitFn func(pos, length int64)) { + if bitmap == nil { + visitFn(0, length) + return + } + rdr := NewSetBitRunReader(bitmap, bitmapOffset, length) + for { + run := rdr.NextRun() + if run.Length == 0 { + break + } + visitFn(run.Pos, run.Length) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go new file mode 100644 index 00000000..1428e49e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitutils + +import "github.com/apache/arrow/go/v12/arrow/bitutil" + +// GenerateBits writes sequential bits to a bitmap. Bits preceding the +// initial start offset are preserved, bits following the bitmap may +// get clobbered. +func GenerateBits(bitmap []byte, start, length int64, g func() bool) { + if length == 0 { + return + } + + cur := bitmap[start/8:] + mask := bitutil.BitMask[start%8] + curbyte := cur[0] & bitutil.PrecedingBitmask[start%8] + + for i := int64(0); i < length; i++ { + bit := g() + if bit { + curbyte = curbyte | mask + } + mask <<= 1 + if mask == 0 { + mask = 1 + cur[0] = curbyte + cur = cur[1:] + curbyte = 0 + } + } + + if mask != 1 { + cur[0] = curbyte + } +} + +// GenerateBitsUnrolled is like GenerateBits but unrolls its main loop for +// higher performance. +// +// See the benchmarks for evidence. +func GenerateBitsUnrolled(bitmap []byte, start, length int64, g func() bool) { + if length == 0 { + return + } + + var ( + curbyte byte + cur = bitmap[start/8:] + startBitOffset uint64 = uint64(start % 8) + mask = bitutil.BitMask[startBitOffset] + remaining = length + ) + + if mask != 0x01 { + curbyte = cur[0] & bitutil.PrecedingBitmask[startBitOffset] + for mask != 0 && remaining > 0 { + if g() { + curbyte |= mask + } + mask <<= 1 + remaining-- + } + cur[0] = curbyte + cur = cur[1:] + } + + var outResults [8]byte + for remainingBytes := remaining / 8; remainingBytes > 0; remainingBytes-- { + for i := 0; i < 8; i++ { + if g() { + outResults[i] = 1 + } else { + outResults[i] = 0 + } + } + cur[0] = (outResults[0] | outResults[1]<<1 | outResults[2]<<2 | + outResults[3]<<3 | outResults[4]<<4 | outResults[5]<<5 | + outResults[6]<<6 | outResults[7]<<7) + cur = cur[1:] + } + + remainingBits := remaining % 8 + if remainingBits > 0 { + curbyte = 0 + mask = 0x01 + for ; remainingBits > 0; remainingBits-- { + if g() { + curbyte |= mask + } + mask <<= 1 + } + cur[0] = curbyte + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_funcs.go b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_funcs.go new file mode 100644 index 00000000..1a859198 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_funcs.go @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashing + +import ( + "math/bits" + "unsafe" + + "github.com/zeebo/xxh3" +) + +func hashInt(val uint64, alg uint64) uint64 { + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + var multipliers = [2]uint64{11400714785074694791, 14029467366897019727} + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + return bits.ReverseBytes64(multipliers[alg] * val) +} + +func hashFloat32(val float32, alg uint64) uint64 { + // grab the raw byte pattern of the + bt := *(*[4]byte)(unsafe.Pointer(&val)) + x := uint64(*(*uint32)(unsafe.Pointer(&bt[0]))) + hx := hashInt(x, alg) + hy := hashInt(x, alg^1) + return 4 ^ hx ^ hy +} + +func hashFloat64(val float64, alg uint64) uint64 { + bt := *(*[8]byte)(unsafe.Pointer(&val)) + hx := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[4]))), alg) + hy := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[0]))), alg^1) + return 8 ^ hx ^ hy +} + +// prime constants used for slightly increasing the hash quality further +var exprimes = [2]uint64{1609587929392839161, 9650029242287828579} + +// for smaller amounts of bytes this is faster than even calling into +// xxh3 to do the hash, so we specialize in order to get the benefits +// of that performance. +func hash(b []byte, alg uint64) uint64 { + n := uint32(len(b)) + if n <= 16 { + switch { + case n > 8: + // 8 < length <= 16 + // apply same principle as above, but as two 64-bit ints + x := *(*uint64)(unsafe.Pointer(&b[n-8])) + y := *(*uint64)(unsafe.Pointer(&b[0])) + hx := hashInt(x, alg) + hy := hashInt(y, alg^1) + return uint64(n) ^ hx ^ hy + case n >= 4: + // 4 < length <= 8 + // we can read the bytes as two overlapping 32-bit ints, apply different + // hash functions to each in parallel + // then xor the results + x := *(*uint32)(unsafe.Pointer(&b[n-4])) + y := *(*uint32)(unsafe.Pointer(&b[0])) + hx := hashInt(uint64(x), alg) + hy := hashInt(uint64(y), alg^1) + return uint64(n) ^ hx ^ hy + case n > 0: + x := uint32((n << 24) ^ (uint32(b[0]) << 16) ^ (uint32(b[n/2]) << 8) ^ uint32(b[n-1])) + return hashInt(uint64(x), alg) + case n == 0: + return 1 + } + } + + // increase differentiation enough to improve hash quality + return xxh3.Hash(b) + exprimes[alg] +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string.go b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string.go new file mode 100644 index 00000000..6630010b --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string.go @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.20 + +package hashing + +import "unsafe" + +func hashString(val string, alg uint64) uint64 { + buf := unsafe.Slice(unsafe.StringData(val), len(val)) + return hash(buf, alg) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string_go1.19.go b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string_go1.19.go new file mode 100644 index 00000000..8a799062 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/hash_string_go1.19.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.20 + +package hashing + +import ( + "reflect" + "unsafe" +) + +func hashString(val string, alg uint64) uint64 { + buf := *(*[]byte)(unsafe.Pointer(&val)) + (*reflect.SliceHeader)(unsafe.Pointer(&buf)).Cap = len(val) + return hash(buf, alg) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/types.tmpldata b/vendor/github.com/apache/arrow/go/v12/internal/hashing/types.tmpldata new file mode 100644 index 00000000..0ba6f765 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/types.tmpldata @@ -0,0 +1,42 @@ +[ + { + "Name": "Int8", + "name": "int8" + }, + { + "Name": "Uint8", + "name": "uint8" + }, + { + "Name": "Int16", + "name": "int16" + }, + { + "Name": "Uint16", + "name": "uint16" + }, + { + "Name": "Int32", + "name": "int32" + }, + { + "Name": "Int64", + "name": "int64" + }, + { + "Name": "Uint32", + "name": "uint32" + }, + { + "Name": "Uint64", + "name": "uint64" + }, + { + "Name": "Float32", + "name": "float32" + }, + { + "Name": "Float64", + "name": "float64" + } +] diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go new file mode 100644 index 00000000..1ab3bf22 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go @@ -0,0 +1,2783 @@ +// Code generated by xxh3_memo_table.gen.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashing + +import ( + "math" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/internal/utils" +) + +type payloadInt8 struct { + val int8 + memoIdx int32 +} + +type entryInt8 struct { + h uint64 + payload payloadInt8 +} + +func (e entryInt8) Valid() bool { return e.h != sentinel } + +// Int8HashTable is a hashtable specifically for int8 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Int8HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryInt8 +} + +// NewInt8HashTable returns a new hash table for int8 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewInt8HashTable(cap uint64) *Int8HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Int8HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryInt8, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Int8HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryInt8, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Int8HashTable) CopyValues(out []int8) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Int8HashTable) CopyValuesSubset(start int, out []int8) { + h.VisitEntries(func(e *entryInt8) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Int8HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Int8HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Int8Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryInt8) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = e.payload.val + } + }) +} + +func (h *Int8HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Int8HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Int8HashTable) Lookup(v uint64, cmp func(int8) bool) (*entryInt8, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Int8HashTable) lookup(v uint64, szMask uint64, cmp func(int8) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryInt8 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Int8HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryInt8, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(int8) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Int8HashTable) Insert(e *entryInt8, v uint64, val int8, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Int8HashTable) VisitEntries(visit func(*entryInt8)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Int8MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Int8MemoTable struct { + tbl *Int8HashTable + nullIdx int32 +} + +// NewInt8MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewInt8MemoTable(num int64) *Int8MemoTable { + return &Int8MemoTable{tbl: NewInt8HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Int8MemoTable) TypeTraits() TypeTraits { + return arrow.Int8Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Int8MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Int8MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Int8MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Int8MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Int8MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Int8MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]int8)) +} + +func (s *Int8MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Int8Traits.CastFromBytes(out)) +} + +func (s *Int8MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Int8Traits.CastFromBytes(out)) +} + +func (s *Int8MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Int8MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Int8MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(int8)), 0) + if e, ok := s.tbl.Lookup(h, func(v int8) bool { return val.(int8) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Int8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(int8)), 0) + e, ok := s.tbl.Lookup(h, func(v int8) bool { + return val.(int8) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(int8), int32(idx)) + } + return +} + +type payloadUint8 struct { + val uint8 + memoIdx int32 +} + +type entryUint8 struct { + h uint64 + payload payloadUint8 +} + +func (e entryUint8) Valid() bool { return e.h != sentinel } + +// Uint8HashTable is a hashtable specifically for uint8 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Uint8HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryUint8 +} + +// NewUint8HashTable returns a new hash table for uint8 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewUint8HashTable(cap uint64) *Uint8HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Uint8HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryUint8, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Uint8HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryUint8, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Uint8HashTable) CopyValues(out []uint8) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Uint8HashTable) CopyValuesSubset(start int, out []uint8) { + h.VisitEntries(func(e *entryUint8) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Uint8HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Uint8HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Uint8Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryUint8) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = e.payload.val + } + }) +} + +func (h *Uint8HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Uint8HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Uint8HashTable) Lookup(v uint64, cmp func(uint8) bool) (*entryUint8, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Uint8HashTable) lookup(v uint64, szMask uint64, cmp func(uint8) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryUint8 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Uint8HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryUint8, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(uint8) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Uint8HashTable) Insert(e *entryUint8, v uint64, val uint8, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Uint8HashTable) VisitEntries(visit func(*entryUint8)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Uint8MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Uint8MemoTable struct { + tbl *Uint8HashTable + nullIdx int32 +} + +// NewUint8MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewUint8MemoTable(num int64) *Uint8MemoTable { + return &Uint8MemoTable{tbl: NewUint8HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Uint8MemoTable) TypeTraits() TypeTraits { + return arrow.Uint8Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Uint8MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Uint8MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Uint8MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Uint8MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Uint8MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Uint8MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]uint8)) +} + +func (s *Uint8MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Uint8Traits.CastFromBytes(out)) +} + +func (s *Uint8MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Uint8Traits.CastFromBytes(out)) +} + +func (s *Uint8MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Uint8MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Uint8MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(uint8)), 0) + if e, ok := s.tbl.Lookup(h, func(v uint8) bool { return val.(uint8) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Uint8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(uint8)), 0) + e, ok := s.tbl.Lookup(h, func(v uint8) bool { + return val.(uint8) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(uint8), int32(idx)) + } + return +} + +type payloadInt16 struct { + val int16 + memoIdx int32 +} + +type entryInt16 struct { + h uint64 + payload payloadInt16 +} + +func (e entryInt16) Valid() bool { return e.h != sentinel } + +// Int16HashTable is a hashtable specifically for int16 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Int16HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryInt16 +} + +// NewInt16HashTable returns a new hash table for int16 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewInt16HashTable(cap uint64) *Int16HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Int16HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryInt16, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Int16HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryInt16, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Int16HashTable) CopyValues(out []int16) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Int16HashTable) CopyValuesSubset(start int, out []int16) { + h.VisitEntries(func(e *entryInt16) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Int16HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Int16HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Int16Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryInt16) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEInt16(e.payload.val) + } + }) +} + +func (h *Int16HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Int16HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Int16HashTable) Lookup(v uint64, cmp func(int16) bool) (*entryInt16, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Int16HashTable) lookup(v uint64, szMask uint64, cmp func(int16) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryInt16 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Int16HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryInt16, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(int16) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Int16HashTable) Insert(e *entryInt16, v uint64, val int16, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Int16HashTable) VisitEntries(visit func(*entryInt16)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Int16MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Int16MemoTable struct { + tbl *Int16HashTable + nullIdx int32 +} + +// NewInt16MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewInt16MemoTable(num int64) *Int16MemoTable { + return &Int16MemoTable{tbl: NewInt16HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Int16MemoTable) TypeTraits() TypeTraits { + return arrow.Int16Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Int16MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Int16MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Int16MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Int16MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Int16MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Int16MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]int16)) +} + +func (s *Int16MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Int16Traits.CastFromBytes(out)) +} + +func (s *Int16MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Int16Traits.CastFromBytes(out)) +} + +func (s *Int16MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Int16MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Int16MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(int16)), 0) + if e, ok := s.tbl.Lookup(h, func(v int16) bool { return val.(int16) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Int16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(int16)), 0) + e, ok := s.tbl.Lookup(h, func(v int16) bool { + return val.(int16) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(int16), int32(idx)) + } + return +} + +type payloadUint16 struct { + val uint16 + memoIdx int32 +} + +type entryUint16 struct { + h uint64 + payload payloadUint16 +} + +func (e entryUint16) Valid() bool { return e.h != sentinel } + +// Uint16HashTable is a hashtable specifically for uint16 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Uint16HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryUint16 +} + +// NewUint16HashTable returns a new hash table for uint16 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewUint16HashTable(cap uint64) *Uint16HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Uint16HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryUint16, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Uint16HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryUint16, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Uint16HashTable) CopyValues(out []uint16) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Uint16HashTable) CopyValuesSubset(start int, out []uint16) { + h.VisitEntries(func(e *entryUint16) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Uint16HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Uint16HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Uint16Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryUint16) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEUint16(e.payload.val) + } + }) +} + +func (h *Uint16HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Uint16HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Uint16HashTable) Lookup(v uint64, cmp func(uint16) bool) (*entryUint16, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Uint16HashTable) lookup(v uint64, szMask uint64, cmp func(uint16) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryUint16 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Uint16HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryUint16, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(uint16) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Uint16HashTable) Insert(e *entryUint16, v uint64, val uint16, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Uint16HashTable) VisitEntries(visit func(*entryUint16)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Uint16MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Uint16MemoTable struct { + tbl *Uint16HashTable + nullIdx int32 +} + +// NewUint16MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewUint16MemoTable(num int64) *Uint16MemoTable { + return &Uint16MemoTable{tbl: NewUint16HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Uint16MemoTable) TypeTraits() TypeTraits { + return arrow.Uint16Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Uint16MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Uint16MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Uint16MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Uint16MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Uint16MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Uint16MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]uint16)) +} + +func (s *Uint16MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Uint16Traits.CastFromBytes(out)) +} + +func (s *Uint16MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Uint16Traits.CastFromBytes(out)) +} + +func (s *Uint16MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Uint16MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Uint16MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(uint16)), 0) + if e, ok := s.tbl.Lookup(h, func(v uint16) bool { return val.(uint16) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Uint16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(uint16)), 0) + e, ok := s.tbl.Lookup(h, func(v uint16) bool { + return val.(uint16) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(uint16), int32(idx)) + } + return +} + +type payloadInt32 struct { + val int32 + memoIdx int32 +} + +type entryInt32 struct { + h uint64 + payload payloadInt32 +} + +func (e entryInt32) Valid() bool { return e.h != sentinel } + +// Int32HashTable is a hashtable specifically for int32 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Int32HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryInt32 +} + +// NewInt32HashTable returns a new hash table for int32 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewInt32HashTable(cap uint64) *Int32HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Int32HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryInt32, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Int32HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryInt32, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Int32HashTable) CopyValues(out []int32) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Int32HashTable) CopyValuesSubset(start int, out []int32) { + h.VisitEntries(func(e *entryInt32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Int32HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Int32HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Int32Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryInt32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEInt32(e.payload.val) + } + }) +} + +func (h *Int32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Int32HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Int32HashTable) Lookup(v uint64, cmp func(int32) bool) (*entryInt32, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Int32HashTable) lookup(v uint64, szMask uint64, cmp func(int32) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryInt32 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Int32HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryInt32, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(int32) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Int32HashTable) Insert(e *entryInt32, v uint64, val int32, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Int32HashTable) VisitEntries(visit func(*entryInt32)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Int32MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Int32MemoTable struct { + tbl *Int32HashTable + nullIdx int32 +} + +// NewInt32MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewInt32MemoTable(num int64) *Int32MemoTable { + return &Int32MemoTable{tbl: NewInt32HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Int32MemoTable) TypeTraits() TypeTraits { + return arrow.Int32Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Int32MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Int32MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Int32MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Int32MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Int32MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Int32MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]int32)) +} + +func (s *Int32MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Int32Traits.CastFromBytes(out)) +} + +func (s *Int32MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Int32Traits.CastFromBytes(out)) +} + +func (s *Int32MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Int32MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Int32MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(int32)), 0) + if e, ok := s.tbl.Lookup(h, func(v int32) bool { return val.(int32) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Int32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(int32)), 0) + e, ok := s.tbl.Lookup(h, func(v int32) bool { + return val.(int32) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(int32), int32(idx)) + } + return +} + +type payloadInt64 struct { + val int64 + memoIdx int32 +} + +type entryInt64 struct { + h uint64 + payload payloadInt64 +} + +func (e entryInt64) Valid() bool { return e.h != sentinel } + +// Int64HashTable is a hashtable specifically for int64 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Int64HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryInt64 +} + +// NewInt64HashTable returns a new hash table for int64 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewInt64HashTable(cap uint64) *Int64HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Int64HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryInt64, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Int64HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryInt64, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Int64HashTable) CopyValues(out []int64) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Int64HashTable) CopyValuesSubset(start int, out []int64) { + h.VisitEntries(func(e *entryInt64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Int64HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Int64HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Int64Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryInt64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEInt64(e.payload.val) + } + }) +} + +func (h *Int64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Int64HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Int64HashTable) Lookup(v uint64, cmp func(int64) bool) (*entryInt64, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Int64HashTable) lookup(v uint64, szMask uint64, cmp func(int64) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryInt64 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Int64HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryInt64, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(int64) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Int64HashTable) Insert(e *entryInt64, v uint64, val int64, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Int64HashTable) VisitEntries(visit func(*entryInt64)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Int64MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Int64MemoTable struct { + tbl *Int64HashTable + nullIdx int32 +} + +// NewInt64MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewInt64MemoTable(num int64) *Int64MemoTable { + return &Int64MemoTable{tbl: NewInt64HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Int64MemoTable) TypeTraits() TypeTraits { + return arrow.Int64Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Int64MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Int64MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Int64MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Int64MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Int64MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Int64MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]int64)) +} + +func (s *Int64MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Int64Traits.CastFromBytes(out)) +} + +func (s *Int64MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Int64Traits.CastFromBytes(out)) +} + +func (s *Int64MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Int64MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Int64MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(int64)), 0) + if e, ok := s.tbl.Lookup(h, func(v int64) bool { return val.(int64) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Int64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(int64)), 0) + e, ok := s.tbl.Lookup(h, func(v int64) bool { + return val.(int64) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(int64), int32(idx)) + } + return +} + +type payloadUint32 struct { + val uint32 + memoIdx int32 +} + +type entryUint32 struct { + h uint64 + payload payloadUint32 +} + +func (e entryUint32) Valid() bool { return e.h != sentinel } + +// Uint32HashTable is a hashtable specifically for uint32 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Uint32HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryUint32 +} + +// NewUint32HashTable returns a new hash table for uint32 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewUint32HashTable(cap uint64) *Uint32HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Uint32HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryUint32, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Uint32HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryUint32, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Uint32HashTable) CopyValues(out []uint32) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Uint32HashTable) CopyValuesSubset(start int, out []uint32) { + h.VisitEntries(func(e *entryUint32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Uint32HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Uint32HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Uint32Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryUint32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEUint32(e.payload.val) + } + }) +} + +func (h *Uint32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Uint32HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Uint32HashTable) Lookup(v uint64, cmp func(uint32) bool) (*entryUint32, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Uint32HashTable) lookup(v uint64, szMask uint64, cmp func(uint32) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryUint32 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Uint32HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryUint32, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(uint32) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Uint32HashTable) Insert(e *entryUint32, v uint64, val uint32, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Uint32HashTable) VisitEntries(visit func(*entryUint32)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Uint32MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Uint32MemoTable struct { + tbl *Uint32HashTable + nullIdx int32 +} + +// NewUint32MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewUint32MemoTable(num int64) *Uint32MemoTable { + return &Uint32MemoTable{tbl: NewUint32HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Uint32MemoTable) TypeTraits() TypeTraits { + return arrow.Uint32Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Uint32MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Uint32MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Uint32MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Uint32MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Uint32MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Uint32MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]uint32)) +} + +func (s *Uint32MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Uint32Traits.CastFromBytes(out)) +} + +func (s *Uint32MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Uint32Traits.CastFromBytes(out)) +} + +func (s *Uint32MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Uint32MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Uint32MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(uint32)), 0) + if e, ok := s.tbl.Lookup(h, func(v uint32) bool { return val.(uint32) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Uint32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(uint32)), 0) + e, ok := s.tbl.Lookup(h, func(v uint32) bool { + return val.(uint32) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(uint32), int32(idx)) + } + return +} + +type payloadUint64 struct { + val uint64 + memoIdx int32 +} + +type entryUint64 struct { + h uint64 + payload payloadUint64 +} + +func (e entryUint64) Valid() bool { return e.h != sentinel } + +// Uint64HashTable is a hashtable specifically for uint64 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Uint64HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryUint64 +} + +// NewUint64HashTable returns a new hash table for uint64 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewUint64HashTable(cap uint64) *Uint64HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Uint64HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryUint64, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Uint64HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryUint64, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Uint64HashTable) CopyValues(out []uint64) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Uint64HashTable) CopyValuesSubset(start int, out []uint64) { + h.VisitEntries(func(e *entryUint64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Uint64HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Uint64HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Uint64Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryUint64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEUint64(e.payload.val) + } + }) +} + +func (h *Uint64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Uint64HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Uint64HashTable) Lookup(v uint64, cmp func(uint64) bool) (*entryUint64, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Uint64HashTable) lookup(v uint64, szMask uint64, cmp func(uint64) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryUint64 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Uint64HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryUint64, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(uint64) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Uint64HashTable) Insert(e *entryUint64, v uint64, val uint64, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Uint64HashTable) VisitEntries(visit func(*entryUint64)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Uint64MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Uint64MemoTable struct { + tbl *Uint64HashTable + nullIdx int32 +} + +// NewUint64MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewUint64MemoTable(num int64) *Uint64MemoTable { + return &Uint64MemoTable{tbl: NewUint64HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Uint64MemoTable) TypeTraits() TypeTraits { + return arrow.Uint64Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Uint64MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Uint64MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Uint64MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Uint64MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Uint64MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Uint64MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]uint64)) +} + +func (s *Uint64MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Uint64Traits.CastFromBytes(out)) +} + +func (s *Uint64MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Uint64Traits.CastFromBytes(out)) +} + +func (s *Uint64MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Uint64MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Uint64MemoTable) Get(val interface{}) (int, bool) { + + h := hashInt(uint64(val.(uint64)), 0) + if e, ok := s.tbl.Lookup(h, func(v uint64) bool { return val.(uint64) == v }); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Uint64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + h := hashInt(uint64(val.(uint64)), 0) + e, ok := s.tbl.Lookup(h, func(v uint64) bool { + return val.(uint64) == v + }) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(uint64), int32(idx)) + } + return +} + +type payloadFloat32 struct { + val float32 + memoIdx int32 +} + +type entryFloat32 struct { + h uint64 + payload payloadFloat32 +} + +func (e entryFloat32) Valid() bool { return e.h != sentinel } + +// Float32HashTable is a hashtable specifically for float32 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Float32HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryFloat32 +} + +// NewFloat32HashTable returns a new hash table for float32 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewFloat32HashTable(cap uint64) *Float32HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Float32HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryFloat32, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Float32HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryFloat32, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Float32HashTable) CopyValues(out []float32) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Float32HashTable) CopyValuesSubset(start int, out []float32) { + h.VisitEntries(func(e *entryFloat32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Float32HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Float32HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Float32Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryFloat32) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEFloat32(e.payload.val) + } + }) +} + +func (h *Float32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Float32HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Float32HashTable) Lookup(v uint64, cmp func(float32) bool) (*entryFloat32, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Float32HashTable) lookup(v uint64, szMask uint64, cmp func(float32) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryFloat32 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Float32HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryFloat32, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(float32) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Float32HashTable) Insert(e *entryFloat32, v uint64, val float32, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Float32HashTable) VisitEntries(visit func(*entryFloat32)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Float32MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Float32MemoTable struct { + tbl *Float32HashTable + nullIdx int32 +} + +// NewFloat32MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewFloat32MemoTable(num int64) *Float32MemoTable { + return &Float32MemoTable{tbl: NewFloat32HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Float32MemoTable) TypeTraits() TypeTraits { + return arrow.Float32Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Float32MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Float32MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Float32MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Float32MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Float32MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Float32MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]float32)) +} + +func (s *Float32MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Float32Traits.CastFromBytes(out)) +} + +func (s *Float32MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Float32Traits.CastFromBytes(out)) +} + +func (s *Float32MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Float32MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Float32MemoTable) Get(val interface{}) (int, bool) { + var cmp func(float32) bool + + if math.IsNaN(float64(val.(float32))) { + cmp = isNan32Cmp + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = float32(math.NaN()) + } else { + cmp = func(v float32) bool { return val.(float32) == v } + } + + h := hashFloat32(val.(float32), 0) + if e, ok := s.tbl.Lookup(h, cmp); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Float32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + var cmp func(float32) bool + + if math.IsNaN(float64(val.(float32))) { + cmp = isNan32Cmp + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = float32(math.NaN()) + } else { + cmp = func(v float32) bool { return val.(float32) == v } + } + + h := hashFloat32(val.(float32), 0) + e, ok := s.tbl.Lookup(h, cmp) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(float32), int32(idx)) + } + return +} + +type payloadFloat64 struct { + val float64 + memoIdx int32 +} + +type entryFloat64 struct { + h uint64 + payload payloadFloat64 +} + +func (e entryFloat64) Valid() bool { return e.h != sentinel } + +// Float64HashTable is a hashtable specifically for float64 that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type Float64HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entryFloat64 +} + +// NewFloat64HashTable returns a new hash table for float64 values +// initialized with the passed in capacity or 32 whichever is larger. +func NewFloat64HashTable(cap uint64) *Float64HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &Float64HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entryFloat64, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *Float64HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entryFloat64, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *Float64HashTable) CopyValues(out []float64) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *Float64HashTable) CopyValuesSubset(start int, out []float64) { + h.VisitEntries(func(e *entryFloat64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *Float64HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *Float64HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.Float64Traits.CastFromBytes(out) + h.VisitEntries(func(e *entryFloat64) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + data[idx] = utils.ToLEFloat64(e.payload.val) + } + }) +} + +func (h *Float64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func (Float64HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *Float64HashTable) Lookup(v uint64, cmp func(float64) bool) (*entryFloat64, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *Float64HashTable) lookup(v uint64, szMask uint64, cmp func(float64) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entryFloat64 + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *Float64HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entryFloat64, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func(float64) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *Float64HashTable) Insert(e *entryFloat64, v uint64, val float64, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *Float64HashTable) VisitEntries(visit func(*entryFloat64)) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// Float64MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type Float64MemoTable struct { + tbl *Float64HashTable + nullIdx int32 +} + +// NewFloat64MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func NewFloat64MemoTable(num int64) *Float64MemoTable { + return &Float64MemoTable{tbl: NewFloat64HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func (Float64MemoTable) TypeTraits() TypeTraits { + return arrow.Float64Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *Float64MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *Float64MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *Float64MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *Float64MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *Float64MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *Float64MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]float64)) +} + +func (s *Float64MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.Float64Traits.CastFromBytes(out)) +} + +func (s *Float64MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.Float64Traits.CastFromBytes(out)) +} + +func (s *Float64MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *Float64MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *Float64MemoTable) Get(val interface{}) (int, bool) { + var cmp func(float64) bool + if math.IsNaN(val.(float64)) { + cmp = math.IsNaN + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = math.NaN() + } else { + cmp = func(v float64) bool { return val.(float64) == v } + } + + h := hashFloat64(val.(float64), 0) + if e, ok := s.tbl.Lookup(h, cmp); ok { + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *Float64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + + var cmp func(float64) bool + if math.IsNaN(val.(float64)) { + cmp = math.IsNaN + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = math.NaN() + } else { + cmp = func(v float64) bool { return val.(float64) == v } + } + + h := hashFloat64(val.(float64), 0) + e, ok := s.tbl.Lookup(h, cmp) + + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.(float64), int32(idx)) + } + return +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl new file mode 100644 index 00000000..60665788 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl @@ -0,0 +1,343 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashing + +import ( + "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v12/internal/utils" +) + +{{range .In}} +type payload{{.Name}} struct { + val {{.name}} + memoIdx int32 +} + +type entry{{.Name}} struct { + h uint64 + payload payload{{.Name}} +} + +func (e entry{{.Name}}) Valid() bool { return e.h != sentinel } + +// {{.Name}}HashTable is a hashtable specifically for {{.name}} that +// is utilized with the MemoTable to generalize interactions for easier +// implementation of dictionaries without losing performance. +type {{.Name}}HashTable struct { + cap uint64 + capMask uint64 + size uint64 + + entries []entry{{.Name}} +} + +// New{{.Name}}HashTable returns a new hash table for {{.name}} values +// initialized with the passed in capacity or 32 whichever is larger. +func New{{.Name}}HashTable(cap uint64) *{{.Name}}HashTable { + initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + ret := &{{.Name}}HashTable{cap: initCap, capMask: initCap - 1, size: 0} + ret.entries = make([]entry{{.Name}}, initCap) + return ret +} + +// Reset drops all of the values in this hash table and re-initializes it +// with the specified initial capacity as if by calling New, but without having +// to reallocate the object. +func (h *{{.Name}}HashTable) Reset(cap uint64) { + h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32)))) + h.capMask = h.cap - 1 + h.size = 0 + h.entries = make([]entry{{.Name}}, h.cap) +} + +// CopyValues is used for copying the values out of the hash table into the +// passed in slice, in the order that they were first inserted +func (h *{{.Name}}HashTable) CopyValues(out []{{.name}}) { + h.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies a subset of the values in the hashtable out, starting +// with the value at start, in the order that they were inserted. +func (h *{{.Name}}HashTable) CopyValuesSubset(start int, out []{{.name}}) { + h.VisitEntries(func(e *entry{{.Name}}) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { + out[idx] = e.payload.val + } + }) +} + +func (h *{{.Name}}HashTable) WriteOut(out []byte) { + h.WriteOutSubset(0, out) +} + +func (h *{{.Name}}HashTable) WriteOutSubset(start int, out []byte) { + data := arrow.{{.Name}}Traits.CastFromBytes(out) + h.VisitEntries(func(e *entry{{.Name}}) { + idx := e.payload.memoIdx - int32(start) + if idx >= 0 { +{{if and (ne .Name "Int8") (ne .Name "Uint8") -}} + data[idx] = utils.ToLE{{.Name}}(e.payload.val) +{{else -}} + data[idx] = e.payload.val +{{end -}} + } + }) +} + +func (h *{{.Name}}HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap } + +func ({{.Name}}HashTable) fixHash(v uint64) uint64 { + if v == sentinel { + return 42 + } + return v +} + +// Lookup retrieves the entry for a given hash value assuming it's payload value returns +// true when passed to the cmp func. Returns a pointer to the entry for the given hash value, +// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false. +func (h *{{.Name}}HashTable) Lookup(v uint64, cmp func({{.name}}) bool) (*entry{{.Name}}, bool) { + idx, ok := h.lookup(v, h.capMask, cmp) + return &h.entries[idx], ok +} + +func (h *{{.Name}}HashTable) lookup(v uint64, szMask uint64, cmp func({{.name}}) bool) (uint64, bool) { + const perturbShift uint8 = 5 + + var ( + idx uint64 + perturb uint64 + e *entry{{.Name}} + ) + + v = h.fixHash(v) + idx = v & szMask + perturb = (v >> uint64(perturbShift)) + 1 + + for { + e = &h.entries[idx] + if e.h == v && cmp(e.payload.val) { + return idx, true + } + + if e.h == sentinel { + return idx, false + } + + // perturbation logic inspired from CPython's set/dict object + // the goal is that all 64 bits of unmasked hash value eventually + // participate int he probing sequence, to minimize clustering + idx = (idx + perturb) & szMask + perturb = (perturb >> uint64(perturbShift)) + 1 + } +} + +func (h *{{.Name}}HashTable) upsize(newcap uint64) error { + newMask := newcap - 1 + + oldEntries := h.entries + h.entries = make([]entry{{.Name}}, newcap) + for _, e := range oldEntries { + if e.Valid() { + idx, _ := h.lookup(e.h, newMask, func({{.name}}) bool { return false }) + h.entries[idx] = e + } + } + h.cap = newcap + h.capMask = newMask + return nil +} + +// Insert updates the given entry with the provided hash value, payload value and memo index. +// The entry pointer must have been retrieved via lookup in order to actually insert properly. +func (h *{{.Name}}HashTable) Insert(e *entry{{.Name}}, v uint64, val {{.name}}, memoIdx int32) error { + e.h = h.fixHash(v) + e.payload.val = val + e.payload.memoIdx = memoIdx + h.size++ + + if h.needUpsize() { + h.upsize(h.cap * uint64(loadFactor) * 2) + } + return nil +} + +// VisitEntries will call the passed in function on each *valid* entry in the hash table, +// a valid entry being one which has had a value inserted into it. +func (h *{{.Name}}HashTable) VisitEntries(visit func(*entry{{.Name}})) { + for _, e := range h.entries { + if e.Valid() { + visit(&e) + } + } +} + +// {{.Name}}MemoTable is a wrapper over the appropriate hashtable to provide an interface +// conforming to the MemoTable interface defined in the encoding package for general interactions +// regarding dictionaries. +type {{.Name}}MemoTable struct { + tbl *{{.Name}}HashTable + nullIdx int32 +} + +// New{{.Name}}MemoTable returns a new memotable with num entries pre-allocated to reduce further +// allocations when inserting. +func New{{.Name}}MemoTable(num int64) *{{.Name}}MemoTable { + return &{{.Name}}MemoTable{tbl: New{{.Name}}HashTable(uint64(num)), nullIdx: KeyNotFound} +} + +func ({{.Name}}MemoTable) TypeTraits() TypeTraits { + return arrow.{{.Name}}Traits +} + +// Reset allows this table to be re-used by dumping all the data currently in the table. +func (s *{{.Name}}MemoTable) Reset() { + s.tbl.Reset(32) + s.nullIdx = KeyNotFound +} + +// Size returns the current number of inserted elements into the table including if a null +// has been inserted. +func (s *{{.Name}}MemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// GetNull returns the index of an inserted null or KeyNotFound along with a bool +// that will be true if found and false if not. +func (s *{{.Name}}MemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// GetOrInsertNull will return the index of the null entry or insert a null entry +// if one currently doesn't exist. The found value will be true if there was already +// a null in the table, and false if it inserted one. +func (s *{{.Name}}MemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = s.GetNull() + if !found { + idx = s.Size() + s.nullIdx = int32(idx) + } + return +} + +// CopyValues will copy the values from the memo table out into the passed in slice +// which must be of the appropriate type. +func (s *{{.Name}}MemoTable) CopyValues(out interface{}) { + s.CopyValuesSubset(0, out) +} + +// CopyValuesSubset is like CopyValues but only copies a subset of values starting +// at the provided start index +func (s *{{.Name}}MemoTable) CopyValuesSubset(start int, out interface{}) { + s.tbl.CopyValuesSubset(start, out.([]{{.name}})) +} + +func (s *{{.Name}}MemoTable) WriteOut(out []byte) { + s.tbl.CopyValues(arrow.{{.Name}}Traits.CastFromBytes(out)) +} + +func (s *{{.Name}}MemoTable) WriteOutSubset(start int, out []byte) { + s.tbl.CopyValuesSubset(start, arrow.{{.Name}}Traits.CastFromBytes(out)) +} + +func (s *{{.Name}}MemoTable) WriteOutLE(out []byte) { + s.tbl.WriteOut(out) +} + +func (s *{{.Name}}MemoTable) WriteOutSubsetLE(start int, out []byte) { + s.tbl.WriteOutSubset(start, out) +} + +// Get returns the index of the requested value in the hash table or KeyNotFound +// along with a boolean indicating if it was found or not. +func (s *{{.Name}}MemoTable) Get(val interface{}) (int, bool) { +{{if and (ne .Name "Float32") (ne .Name "Float64") }} + h := hashInt(uint64(val.({{.name}})), 0) + if e, ok := s.tbl.Lookup(h, func(v {{.name}}) bool { return val.({{.name}}) == v }); ok { +{{ else -}} + var cmp func({{.name}}) bool + {{if eq .Name "Float32"}} + if math.IsNaN(float64(val.(float32))) { + cmp = isNan32Cmp + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = float32(math.NaN()) + {{ else -}} + if math.IsNaN(val.(float64)) { + cmp = math.IsNaN + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = math.NaN() + {{end -}} + } else { + cmp = func(v {{.name}}) bool { return val.({{.name}}) == v } + } + + h := hash{{.Name}}(val.({{.name}}), 0) + if e, ok := s.tbl.Lookup(h, cmp); ok { +{{ end -}} + return int(e.payload.memoIdx), ok + } + return KeyNotFound, false +} + +// GetOrInsert will return the index of the specified value in the table, or insert the +// value into the table and return the new index. found indicates whether or not it already +// existed in the table (true) or was inserted by this call (false). +func (s *{{.Name}}MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + {{if and (ne .Name "Float32") (ne .Name "Float64") }} + h := hashInt(uint64(val.({{.name}})), 0) + e, ok := s.tbl.Lookup(h, func(v {{.name}}) bool { + return val.({{.name}}) == v + }) +{{ else }} + var cmp func({{.name}}) bool + {{if eq .Name "Float32"}} + if math.IsNaN(float64(val.(float32))) { + cmp = isNan32Cmp + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = float32(math.NaN()) + {{ else -}} + if math.IsNaN(val.(float64)) { + cmp = math.IsNaN + // use consistent internal bit pattern for NaN regardless of the pattern + // that is passed to us. NaN is NaN is NaN + val = math.NaN() + {{end -}} + } else { + cmp = func(v {{.name}}) bool { return val.({{.name}}) == v } + } + + h := hash{{.Name}}(val.({{.name}}), 0) + e, ok := s.tbl.Lookup(h, cmp) +{{ end }} + if ok { + idx = int(e.payload.memoIdx) + found = true + } else { + idx = s.Size() + s.tbl.Insert(e, h, val.({{.name}}), int32(idx)) + } + return +} +{{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go new file mode 100644 index 00000000..c67b56bc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go @@ -0,0 +1,421 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hashing provides utilities for and an implementation of a hash +// table which is more performant than the default go map implementation +// by leveraging xxh3 and some custom hash functions. +package hashing + +import ( + "bytes" + "math" + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v12/parquet" +) + +//go:generate go run ../../arrow/_tools/tmpl/main.go -i -data=types.tmpldata xxh3_memo_table.gen.go.tmpl + +type TypeTraits interface { + BytesRequired(n int) int +} + +// MemoTable interface for hash tables and dictionary encoding. +// +// Values will remember the order they are inserted to generate a valid +// dictionary. +type MemoTable interface { + TypeTraits() TypeTraits + // Reset drops everything in the table allowing it to be reused + Reset() + // Size returns the current number of unique values stored in + // the table, including whether or not a null value has been + // inserted via GetOrInsertNull. + Size() int + // GetOrInsert returns the index of the table the specified value is, + // and a boolean indicating whether or not the value was found in + // the table (if false, the value was inserted). An error is returned + // if val is not the appropriate type for the table. + GetOrInsert(val interface{}) (idx int, existed bool, err error) + // GetOrInsertNull returns the index of the null value in the table, + // inserting one if it hasn't already been inserted. It returns a boolean + // indicating if the null value already existed or not in the table. + GetOrInsertNull() (idx int, existed bool) + // GetNull returns the index of the null value in the table, but does not + // insert one if it doesn't already exist. Will return -1 if it doesn't exist + // indicated by a false value for the boolean. + GetNull() (idx int, exists bool) + // WriteOut copys the unique values of the memotable out to the byte slice + // provided. Must have allocated enough bytes for all the values. + WriteOut(out []byte) + // WriteOutSubset is like WriteOut, but only writes a subset of values + // starting with the index offset. + WriteOutSubset(offset int, out []byte) +} + +type NumericMemoTable interface { + MemoTable + WriteOutLE(out []byte) + WriteOutSubsetLE(offset int, out []byte) +} + +const ( + sentinel uint64 = 0 + loadFactor int64 = 2 +) + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +var isNan32Cmp = func(v float32) bool { return math.IsNaN(float64(v)) } + +// KeyNotFound is the constant returned by memo table functions when a key isn't found in the table +const KeyNotFound = -1 + +type BinaryBuilderIFace interface { + Reserve(int) + ReserveData(int) + Retain() + Resize(int) + ResizeData(int) + Release() + DataLen() int + Value(int) []byte + Len() int + AppendNull() + AppendString(string) + Append([]byte) +} + +// BinaryMemoTable is our hashtable for binary data using the BinaryBuilder +// to construct the actual data in an easy to pass around way with minimal copies +// while using a hash table to keep track of the indexes into the dictionary that +// is created as we go. +type BinaryMemoTable struct { + tbl *Int32HashTable + builder BinaryBuilderIFace + nullIdx int +} + +// NewBinaryMemoTable returns a hash table for Binary data, the passed in allocator will +// be utilized for the BinaryBuilder, if nil then memory.DefaultAllocator will be used. +// initial and valuesize can be used to pre-allocate the table to reduce allocations. With +// initial being the initial number of entries to allocate for and valuesize being the starting +// amount of space allocated for writing the actual binary data. +func NewBinaryMemoTable(initial, valuesize int, bldr BinaryBuilderIFace) *BinaryMemoTable { + bldr.Reserve(int(initial)) + datasize := valuesize + if datasize <= 0 { + datasize = initial * 4 + } + bldr.ReserveData(datasize) + return &BinaryMemoTable{tbl: NewInt32HashTable(uint64(initial)), builder: bldr, nullIdx: KeyNotFound} +} + +type unimplementedtraits struct{} + +func (unimplementedtraits) BytesRequired(int) int { panic("unimplemented") } + +func (BinaryMemoTable) TypeTraits() TypeTraits { + return unimplementedtraits{} +} + +// Reset dumps all of the data in the table allowing it to be reutilized. +func (s *BinaryMemoTable) Reset() { + s.tbl.Reset(32) + s.builder.Resize(0) + s.builder.ResizeData(0) + s.builder.Reserve(int(32)) + s.builder.ReserveData(int(32) * 4) + s.nullIdx = KeyNotFound +} + +// GetNull returns the index of a null that has been inserted into the table or +// KeyNotFound. The bool returned will be true if there was a null inserted into +// the table, and false otherwise. +func (s *BinaryMemoTable) GetNull() (int, bool) { + return int(s.nullIdx), s.nullIdx != KeyNotFound +} + +// Size returns the current size of the memo table including the null value +// if one has been inserted. +func (s *BinaryMemoTable) Size() int { + sz := int(s.tbl.size) + if _, ok := s.GetNull(); ok { + sz++ + } + return sz +} + +// helper function to easily return a byte slice for any given value +// regardless of the type if it's a []byte, parquet.ByteArray, +// parquet.FixedLenByteArray or string. +func (BinaryMemoTable) valAsByteSlice(val interface{}) []byte { + switch v := val.(type) { + case []byte: + return v + case parquet.ByteArray: + return *(*[]byte)(unsafe.Pointer(&v)) + case parquet.FixedLenByteArray: + return *(*[]byte)(unsafe.Pointer(&v)) + case string: + var out []byte + h := (*reflect.StringHeader)(unsafe.Pointer(&v)) + s := (*reflect.SliceHeader)(unsafe.Pointer(&out)) + s.Data = h.Data + s.Len = h.Len + s.Cap = h.Len + return out + default: + panic("invalid type for binarymemotable") + } +} + +// helper function to get the hash value regardless of the underlying binary type +func (BinaryMemoTable) getHash(val interface{}) uint64 { + switch v := val.(type) { + case string: + return hashString(v, 0) + case []byte: + return hash(v, 0) + case parquet.ByteArray: + return hash(*(*[]byte)(unsafe.Pointer(&v)), 0) + case parquet.FixedLenByteArray: + return hash(*(*[]byte)(unsafe.Pointer(&v)), 0) + default: + panic("invalid type for binarymemotable") + } +} + +// helper function to append the given value to the builder regardless +// of the underlying binary type. +func (b *BinaryMemoTable) appendVal(val interface{}) { + switch v := val.(type) { + case string: + b.builder.AppendString(v) + case []byte: + b.builder.Append(v) + case parquet.ByteArray: + b.builder.Append(*(*[]byte)(unsafe.Pointer(&v))) + case parquet.FixedLenByteArray: + b.builder.Append(*(*[]byte)(unsafe.Pointer(&v))) + } +} + +func (b *BinaryMemoTable) lookup(h uint64, val []byte) (*entryInt32, bool) { + return b.tbl.Lookup(h, func(i int32) bool { + return bytes.Equal(val, b.builder.Value(int(i))) + }) +} + +// Get returns the index of the specified value in the table or KeyNotFound, +// and a boolean indicating whether it was found in the table. +func (b *BinaryMemoTable) Get(val interface{}) (int, bool) { + if p, ok := b.lookup(b.getHash(val), b.valAsByteSlice(val)); ok { + return int(p.payload.val), ok + } + return KeyNotFound, false +} + +// GetOrInsert returns the index of the given value in the table, if not found +// it is inserted into the table. The return value 'found' indicates whether the value +// was found in the table (true) or inserted (false) along with any possible error. +func (b *BinaryMemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) { + h := b.getHash(val) + p, found := b.lookup(h, b.valAsByteSlice(val)) + if found { + idx = int(p.payload.val) + } else { + idx = b.Size() + b.appendVal(val) + b.tbl.Insert(p, h, int32(idx), -1) + } + return +} + +// GetOrInsertNull retrieves the index of a null in the table or inserts +// null into the table, returning the index and a boolean indicating if it was +// found in the table (true) or was inserted (false). +func (b *BinaryMemoTable) GetOrInsertNull() (idx int, found bool) { + idx, found = b.GetNull() + if !found { + idx = b.Size() + b.nullIdx = idx + b.builder.AppendNull() + } + return +} + +// helper function to get the offset into the builder data for a given +// index value. +func (b *BinaryMemoTable) findOffset(idx int) uintptr { + if b.builder.DataLen() == 0 { + // only empty strings, short circuit + return 0 + } + + val := b.builder.Value(idx) + for len(val) == 0 { + idx++ + if idx >= b.builder.Len() { + break + } + val = b.builder.Value(idx) + } + if len(val) != 0 { + return uintptr(unsafe.Pointer(&val[0])) + } + return uintptr(b.builder.DataLen()) + b.findOffset(0) +} + +// CopyOffsets copies the list of offsets into the passed in slice, the offsets +// being the start and end values of the underlying allocated bytes in the builder +// for the individual values of the table. out should be at least sized to Size()+1 +func (b *BinaryMemoTable) CopyOffsets(out []int32) { + b.CopyOffsetsSubset(0, out) +} + +// CopyOffsetsSubset is like CopyOffsets but instead of copying all of the offsets, +// it gets a subset of the offsets in the table starting at the index provided by "start". +func (b *BinaryMemoTable) CopyOffsetsSubset(start int, out []int32) { + if b.builder.Len() <= start { + return + } + + first := b.findOffset(0) + delta := b.findOffset(start) + sz := b.Size() + for i := start; i < sz; i++ { + offset := int32(b.findOffset(i) - delta) + out[i-start] = offset + } + + out[sz-start] = int32(b.builder.DataLen() - (int(delta) - int(first))) +} + +// CopyLargeOffsets copies the list of offsets into the passed in slice, the offsets +// being the start and end values of the underlying allocated bytes in the builder +// for the individual values of the table. out should be at least sized to Size()+1 +func (b *BinaryMemoTable) CopyLargeOffsets(out []int64) { + b.CopyLargeOffsetsSubset(0, out) +} + +// CopyLargeOffsetsSubset is like CopyOffsets but instead of copying all of the offsets, +// it gets a subset of the offsets in the table starting at the index provided by "start". +func (b *BinaryMemoTable) CopyLargeOffsetsSubset(start int, out []int64) { + if b.builder.Len() <= start { + return + } + + first := b.findOffset(0) + delta := b.findOffset(start) + sz := b.Size() + for i := start; i < sz; i++ { + offset := int64(b.findOffset(i) - delta) + out[i-start] = offset + } + + out[sz-start] = int64(b.builder.DataLen() - (int(delta) - int(first))) +} + +// CopyValues copies the raw binary data bytes out, out should be a []byte +// with at least ValuesSize bytes allocated to copy into. +func (b *BinaryMemoTable) CopyValues(out interface{}) { + b.CopyValuesSubset(0, out) +} + +// CopyValuesSubset copies the raw binary data bytes out starting with the value +// at the index start, out should be a []byte with at least ValuesSize bytes allocated +func (b *BinaryMemoTable) CopyValuesSubset(start int, out interface{}) { + if b.builder.Len() <= start { + return + } + + var ( + first = b.findOffset(0) + offset = b.findOffset(int(start)) + length = b.builder.DataLen() - int(offset-first) + ) + + outval := out.([]byte) + copy(outval, b.builder.Value(start)[0:length]) +} + +func (b *BinaryMemoTable) WriteOut(out []byte) { + b.CopyValues(out) +} + +func (b *BinaryMemoTable) WriteOutSubset(start int, out []byte) { + b.CopyValuesSubset(start, out) +} + +// CopyFixedWidthValues exists to cope with the fact that the table doesn't keep +// track of the fixed width when inserting the null value the databuffer holds a +// zero length byte slice for the null value (if found) +func (b *BinaryMemoTable) CopyFixedWidthValues(start, width int, out []byte) { + if start >= b.Size() { + return + } + + null, exists := b.GetNull() + if !exists || null < start { + // nothing to skip, proceed as usual + b.CopyValuesSubset(start, out) + return + } + + var ( + leftOffset = b.findOffset(start) + nullOffset = b.findOffset(null) + leftSize = nullOffset - leftOffset + rightOffset = leftOffset + uintptr(b.ValuesSize()) + ) + + if leftSize > 0 { + copy(out, b.builder.Value(start)[0:leftSize]) + } + + rightSize := rightOffset - nullOffset + if rightSize > 0 { + // skip the null fixed size value + copy(out[int(leftSize)+width:], b.builder.Value(null + 1)[0:rightSize]) + } +} + +// VisitValues exists to run the visitFn on each value currently in the hash table. +func (b *BinaryMemoTable) VisitValues(start int, visitFn func([]byte)) { + for i := int(start); i < b.Size(); i++ { + visitFn(b.builder.Value(i)) + } +} + +// Release is used to tell the underlying builder that it can release the memory allocated +// when the reference count reaches 0, this is safe to be called from multiple goroutines +// simultaneously +func (b *BinaryMemoTable) Release() { b.builder.Release() } + +// Retain increases the ref count, it is safe to call it from multiple goroutines +// simultaneously. +func (b *BinaryMemoTable) Retain() { b.builder.Retain() } + +// ValuesSize returns the current total size of all the raw bytes that have been inserted +// into the memotable so far. +func (b *BinaryMemoTable) ValuesSize() int { return b.builder.DataLen() } diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/Makefile b/vendor/github.com/apache/arrow/go/v12/internal/utils/Makefile new file mode 100644 index 00000000..fded9d1d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/Makefile @@ -0,0 +1,80 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this converts rotate instructions from "ro[lr] " -> "ro[lr] , 1" for yasm compatibility +PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/' + +C2GOASM=c2goasm +CC=clang-11 +C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 \ + -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib +ASM_FLAGS_AVX2=-mavx2 -mfma +ASM_FLAGS_SSE4=-msse4 +ASM_FLAGS_BMI2=-mbmi2 +ASM_FLAGS_POPCNT=-mpopcnt + +C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \ + -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib + +GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') +ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') + +.PHONEY: assembly + +INTEL_SOURCES := \ + min_max_avx2_amd64.s min_max_sse4_amd64.s transpose_ints_avx2_amd64.s transpose_ints_sse4_amd64.s + +# +# ARROW-15336: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support. +# min_max_neon_arm64.s was generated by asm2plan9s. +# And manually formatted it as the Arm64 Plan9. +# + +assembly: $(INTEL_SOURCES) + +_lib/min_max_avx2_amd64.s: _lib/min_max.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/min_max_sse4_amd64.s: _lib/min_max.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/min_max_neon.s: _lib/min_max.c + $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/transpose_ints_avx2_amd64.s: _lib/transpose_ints.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/transpose_ints_sse4_amd64.s: _lib/transpose_ints.c + $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +_lib/transpose_ints_neon.s: _lib/transpose_ints.c + $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@ + +min_max_avx2_amd64.s: _lib/min_max_avx2_amd64.s + $(C2GOASM) -a -f $^ $@ + +min_max_sse4_amd64.s: _lib/min_max_sse4_amd64.s + $(C2GOASM) -a -f $^ $@ + +transpose_ints_avx2_amd64.s: _lib/transpose_ints_avx2_amd64.s + $(C2GOASM) -a -f $^ $@ + +transpose_ints_sse4_amd64.s: _lib/transpose_ints_sse4_amd64.s + $(C2GOASM) -a -f $^ $@ + +clean: + rm -f $(INTEL_SOURCES) + rm -f $(addprefix _lib/,$(INTEL_SOURCES)) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/buf_reader.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/buf_reader.go new file mode 100644 index 00000000..0b2381da --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/buf_reader.go @@ -0,0 +1,212 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bufio" + "errors" + "fmt" + "io" +) + +// bufferedReader is similar to bufio.Reader except +// it will expand the buffer if necessary when asked to Peek +// more bytes than are in the buffer +type bufferedReader struct { + bufferSz int + buf []byte + r, w int + rd io.Reader + err error +} + +// NewBufferedReader returns a buffered reader with similar semantics to bufio.Reader +// except Peek will expand the internal buffer if needed rather than return +// an error. +func NewBufferedReader(rd io.Reader, sz int) *bufferedReader { + // if rd is already a buffered reader whose buffer is >= the requested size + // then just return it as is. no need to make a new object. + b, ok := rd.(*bufferedReader) + if ok && len(b.buf) >= sz { + return b + } + + r := &bufferedReader{ + rd: rd, + } + r.resizeBuffer(sz) + return r +} + +func (b *bufferedReader) resetBuffer() { + if b.buf == nil { + b.buf = make([]byte, b.bufferSz) + } else if b.bufferSz > cap(b.buf) { + buf := b.buf + b.buf = make([]byte, b.bufferSz) + copy(b.buf, buf) + } else { + b.buf = b.buf[:b.bufferSz] + } +} + +func (b *bufferedReader) resizeBuffer(newSize int) { + b.bufferSz = newSize + b.resetBuffer() +} + +func (b *bufferedReader) fill() error { + // slide existing data to the beginning + if b.r > 0 { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + if b.w >= len(b.buf) { + return fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrBufferFull) + } + + n, err := io.ReadAtLeast(b.rd, b.buf[b.w:], 1) + if n < 0 { + return fmt.Errorf("arrow/bufferedreader: filling buffer: %w", bufio.ErrNegativeCount) + } + + b.w += n + b.err = err + return nil +} + +func (b *bufferedReader) readErr() error { + err := b.err + b.err = nil + return err +} + +// Buffered returns the number of bytes currently buffered +func (b *bufferedReader) Buffered() int { return b.w - b.r } + +// SetBufferSize resets the size of the internal buffer to the desired size. +// Will return an error if newSize is <= 0 or if newSize is less than the size +// of the buffered data. +func (b *bufferedReader) SetBufferSize(newSize int) error { + if newSize <= 0 { + return errors.New("buffer size should be positive") + } + + if b.w >= newSize { + return errors.New("cannot shrink read buffer if buffered data remains") + } + + b.resizeBuffer(newSize) + return nil +} + +// Peek will buffer and return n bytes from the underlying reader without advancing +// the reader itself. If n is larger than the current buffer size, the buffer will +// be expanded to accommodate the extra bytes rather than error. +func (b *bufferedReader) Peek(n int) ([]byte, error) { + if n < 0 { + return nil, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount) + } + + if n > len(b.buf) { + if err := b.SetBufferSize(n); err != nil { + return nil, err + } + } + + for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil { + b.fill() // b.w-b.r < len(b.buf) => buffer is not full + } + + return b.buf[b.r : b.r+n], b.readErr() +} + +// Discard skips the next n bytes either by advancing the internal buffer +// or by reading that many bytes in and throwing them away. +func (b *bufferedReader) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount) + } + + if n == 0 { + return + } + + remain := n + for { + skip := b.Buffered() + if skip == 0 { + b.fill() + skip = b.Buffered() + } + if skip > remain { + skip = remain + } + b.r += skip + remain -= skip + if remain == 0 { + return n, nil + } + if b.err != nil { + return n - remain, b.readErr() + } + } +} + +func (b *bufferedReader) Read(p []byte) (n int, err error) { + n = len(p) + if n == 0 { + if b.Buffered() > 0 { + return 0, nil + } + return 0, b.readErr() + } + + if b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + if len(p) >= len(b.buf) { + // large read, empty buffer + // read directly into p to avoid extra copy + n, b.err = b.rd.Read(p) + if n < 0 { + return n, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount) + } + return n, b.readErr() + } + + // one read + // don't use b.fill + b.r, b.w = 0, 0 + n, b.err = b.rd.Read(b.buf) + if n < 0 { + return n, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount) + } + if n == 0 { + return 0, b.readErr() + } + b.w += n + } + + // copy as much as we can + n = copy(p, b.buf[b.r:b.w]) + b.r += n + return n, nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_default.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_default.go new file mode 100644 index 00000000..5fd257f5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_default.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !s390x + +package utils + +var ( + ToLEInt16 = func(x int16) int16 { return x } + ToLEUint16 = func(x uint16) uint16 { return x } + ToLEUint32 = func(x uint32) uint32 { return x } + ToLEUint64 = func(x uint64) uint64 { return x } + ToLEInt32 = func(x int32) int32 { return x } + ToLEInt64 = func(x int64) int64 { return x } + ToLEFloat32 = func(x float32) float32 { return x } + ToLEFloat64 = func(x float64) float64 { return x } +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_s390x.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_s390x.go new file mode 100644 index 00000000..7bb27cd8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_s390x.go @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "math" + "math/bits" +) + +var ( + ToLEInt16 = func(x int16) int16 { return int16(bits.ReverseBytes16(uint16(x))) } + ToLEUint16 = bits.ReverseBytes16 + ToLEUint32 = bits.ReverseBytes32 + ToLEUint64 = bits.ReverseBytes64 + ToLEInt32 = func(x int32) int32 { return int32(bits.ReverseBytes32(uint32(x))) } + ToLEInt64 = func(x int64) int64 { return int64(bits.ReverseBytes64(uint64(x))) } + ToLEFloat32 = func(x float32) float32 { return math.Float32frombits(bits.ReverseBytes32(math.Float32bits(x))) } + ToLEFloat64 = func(x float64) float64 { return math.Float64frombits(bits.ReverseBytes64(math.Float64bits(x))) } +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/math.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/math.go new file mode 100644 index 00000000..62cf96ce --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/math.go @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +// Min is a convenience Min function for int64 +func Min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// MinInt is a convenience Min function for int +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} + +// Max is a convenience Max function for int64 +func Max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +// MaxInt is a convenience Max function for int +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max.go new file mode 100644 index 00000000..3d7b0024 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max.go @@ -0,0 +1,212 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "math" +) + +// this file contains pure go implementations of the min_max functions that are +// SIMD accelerated so that we can fallback to these if the cpu doesn't support +// AVX2 or SSE4 instructions. + +func int8MinMax(values []int8) (min, max int8) { + min = math.MaxInt8 + max = math.MinInt8 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func uint8MinMax(values []uint8) (min, max uint8) { + min = math.MaxUint8 + max = 0 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func int16MinMax(values []int16) (min, max int16) { + min = math.MaxInt16 + max = math.MinInt16 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func uint16MinMax(values []uint16) (min, max uint16) { + min = math.MaxUint16 + max = 0 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func int32MinMax(values []int32) (min, max int32) { + min = math.MaxInt32 + max = math.MinInt32 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func uint32MinMax(values []uint32) (min, max uint32) { + min = math.MaxUint32 + max = 0 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func int64MinMax(values []int64) (min, max int64) { + min = math.MaxInt64 + max = math.MinInt64 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +func uint64MinMax(values []uint64) (min, max uint64) { + min = math.MaxUint64 + max = 0 + + for _, v := range values { + if min > v { + min = v + } + if max < v { + max = v + } + } + return +} + +var minmaxFuncs = struct { + i8 func([]int8) (int8, int8) + ui8 func([]uint8) (uint8, uint8) + i16 func([]int16) (int16, int16) + ui16 func([]uint16) (uint16, uint16) + i32 func([]int32) (int32, int32) + ui32 func([]uint32) (uint32, uint32) + i64 func([]int64) (int64, int64) + ui64 func([]uint64) (uint64, uint64) +}{} + +// GetMinMaxInt8 returns the min and max for a int8 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxInt8(v []int8) (min, max int8) { + return minmaxFuncs.i8(v) +} + +// GetMinMaxUint8 returns the min and max for a uint8 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxUint8(v []uint8) (min, max uint8) { + return minmaxFuncs.ui8(v) +} + +// GetMinMaxInt16 returns the min and max for a int16 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxInt16(v []int16) (min, max int16) { + return minmaxFuncs.i16(v) +} + +// GetMinMaxUint16 returns the min and max for a uint16 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxUint16(v []uint16) (min, max uint16) { + return minmaxFuncs.ui16(v) +} + +// GetMinMaxInt32 returns the min and max for a int32 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxInt32(v []int32) (min, max int32) { + return minmaxFuncs.i32(v) +} + +// GetMinMaxUint32 returns the min and max for a uint32 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxUint32(v []uint32) (min, max uint32) { + return minmaxFuncs.ui32(v) +} + +// GetMinMaxInt64 returns the min and max for a int64 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxInt64(v []int64) (min, max int64) { + return minmaxFuncs.i64(v) +} + +// GetMinMaxUint64 returns the min and max for a uint64 slice, using AVX2 or +// SSE4 cpu extensions if available, falling back to a pure go implementation +// if they are unavailable or built with the noasm tag. +func GetMinMaxUint64(v []uint64) (min, max uint64) { + return minmaxFuncs.ui64(v) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_amd64.go new file mode 100644 index 00000000..5fccddbe --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_amd64.go @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import "golang.org/x/sys/cpu" + +func init() { + // if the CPU supports AVX2 or SSE4 then let's use those to benefit from SIMD + // to accelerate the performance for finding the min and max for an integral slice. + // otherwise fallback to a pure go implementation if the cpu doesn't have these features. + if cpu.X86.HasAVX2 { + minmaxFuncs.i8 = int8MaxMinAVX2 + minmaxFuncs.ui8 = uint8MaxMinAVX2 + minmaxFuncs.i16 = int16MaxMinAVX2 + minmaxFuncs.ui16 = uint16MaxMinAVX2 + minmaxFuncs.i32 = int32MaxMinAVX2 + minmaxFuncs.ui32 = uint32MaxMinAVX2 + minmaxFuncs.i64 = int64MaxMinAVX2 + minmaxFuncs.ui64 = uint64MaxMinAVX2 + } else if cpu.X86.HasSSE42 { + minmaxFuncs.i8 = int8MaxMinSSE4 + minmaxFuncs.ui8 = uint8MaxMinSSE4 + minmaxFuncs.i16 = int16MaxMinSSE4 + minmaxFuncs.ui16 = uint16MaxMinSSE4 + minmaxFuncs.i32 = int32MaxMinSSE4 + minmaxFuncs.ui32 = uint32MaxMinSSE4 + minmaxFuncs.i64 = int64MaxMinSSE4 + minmaxFuncs.ui64 = uint64MaxMinSSE4 + } else { + minmaxFuncs.i8 = int8MinMax + minmaxFuncs.ui8 = uint8MinMax + minmaxFuncs.i16 = int16MinMax + minmaxFuncs.ui16 = uint16MinMax + minmaxFuncs.i32 = int32MinMax + minmaxFuncs.ui32 = uint32MinMax + minmaxFuncs.i64 = int64MinMax + minmaxFuncs.ui64 = uint64MinMax + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_arm64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_arm64.go new file mode 100644 index 00000000..7404e95d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_arm64.go @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import ( + "os" + "strings" +) +import "golang.org/x/sys/cpu" + +func init() { + // Added ability to enable extension via environment: + // ARM_ENABLE_EXT=NEON go test + if ext, ok := os.LookupEnv("ARM_ENABLE_EXT"); ok { + exts := strings.Split(ext, ",") + + for _, x := range exts { + switch x { + case "NEON": + cpu.ARM64.HasASIMD = true + case "AES": + cpu.ARM64.HasAES = true + case "PMULL": + cpu.ARM64.HasPMULL = true + default: + cpu.ARM64.HasASIMD = false + cpu.ARM64.HasAES = false + cpu.ARM64.HasPMULL = false + } + } + } + if cpu.ARM64.HasASIMD { + minmaxFuncs.i32 = int32MaxMinNEON + minmaxFuncs.ui32 = uint32MaxMinNEON + minmaxFuncs.i64 = int64MaxMinNEON + minmaxFuncs.ui64 = uint64MaxMinNEON + } else { + minmaxFuncs.i32 = int32MinMax + minmaxFuncs.ui32 = uint32MinMax + minmaxFuncs.i64 = int64MinMax + minmaxFuncs.ui64 = uint64MinMax + } + + // haven't yet generated the NEON arm64 for these + minmaxFuncs.i8 = int8MinMax + minmaxFuncs.ui8 = uint8MinMax + minmaxFuncs.i16 = int16MinMax + minmaxFuncs.ui16 = uint16MinMax +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.go new file mode 100644 index 00000000..af672624 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.go @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import ( + "unsafe" +) + +// This file contains convenience functions for utilizing AVX2 intrinsics to quickly +// and efficiently get the min and max from an integral slice. + +//go:noescape +func _int8_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int8MaxMinAVX2(values []int8) (min, max int8) { + _int8_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint8_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint8MaxMinAVX2(values []uint8) (min, max uint8) { + _uint8_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int16_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int16MaxMinAVX2(values []int16) (min, max int16) { + _int16_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint16_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint16MaxMinAVX2(values []uint16) (min, max uint16) { + _uint16_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int32_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int32MaxMinAVX2(values []int32) (min, max int32) { + _int32_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint32_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint32MaxMinAVX2(values []uint32) (min, max uint32) { + _uint32_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int64_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int64MaxMinAVX2(values []int64) (min, max int64) { + _int64_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint64_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint64MaxMinAVX2(values []uint64) (min, max uint64) { + _uint64_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.s new file mode 100644 index 00000000..fe0c36e0 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.s @@ -0,0 +1,927 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +DATA LCDATA1<>+0x000(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x008(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x010(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x018(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x020(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x028(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x030(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x038(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x040(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x048(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x050(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x058(SB)/8, $0x8080808080808080 +GLOBL LCDATA1<>(SB), 8, $96 + +TEXT ยท_int8_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA1<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB0_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x3f // cmp esi, 63 + JA LBB0_4 + WORD $0xb041; BYTE $0x80 // mov r8b, -128 + WORD $0xb640; BYTE $0x7f // mov sil, 127 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB0_11 + +LBB0_1: + WORD $0xb640; BYTE $0x7f // mov sil, 127 + WORD $0xb041; BYTE $0x80 // mov r8b, -128 + JMP LBB0_12 + +LBB0_4: + WORD $0x8945; BYTE $0xca // mov r10d, r9d + LONG $0xc0e28341 // and r10d, -64 + LONG $0xc0428d49 // lea rax, [r10 - 64] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x06e8c149 // shr r8, 6 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB0_5 + WORD $0x894c; BYTE $0xc6 // mov rsi, r8 + LONG $0xfee68348 // and rsi, -2 + WORD $0xf748; BYTE $0xde // neg rsi + LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI0_0] */ + LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI0_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1 + +LBB0_7: + LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax] + LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32] + LONG $0x746ffec5; WORD $0x4007 // vmovdqu ymm6, yword [rdi + rax + 64] + LONG $0x7c6ffec5; WORD $0x6007 // vmovdqu ymm7, yword [rdi + rax + 96] + LONG $0x387de2c4; BYTE $0xc4 // vpminsb ymm0, ymm0, ymm4 + LONG $0x386de2c4; BYTE $0xd5 // vpminsb ymm2, ymm2, ymm5 + LONG $0x3c75e2c4; BYTE $0xcc // vpmaxsb ymm1, ymm1, ymm4 + LONG $0x3c65e2c4; BYTE $0xdd // vpmaxsb ymm3, ymm3, ymm5 + LONG $0x387de2c4; BYTE $0xc6 // vpminsb ymm0, ymm0, ymm6 + LONG $0x386de2c4; BYTE $0xd7 // vpminsb ymm2, ymm2, ymm7 + LONG $0x3c75e2c4; BYTE $0xce // vpmaxsb ymm1, ymm1, ymm6 + LONG $0x3c65e2c4; BYTE $0xdf // vpmaxsb ymm3, ymm3, ymm7 + LONG $0x80e88348 // sub rax, -128 + LONG $0x02c68348 // add rsi, 2 + JNE LBB0_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB0_10 + +LBB0_9: + LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax] + LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32] + LONG $0x3c65e2c4; BYTE $0xdd // vpmaxsb ymm3, ymm3, ymm5 + LONG $0x3c75e2c4; BYTE $0xcc // vpmaxsb ymm1, ymm1, ymm4 + LONG $0x386de2c4; BYTE $0xd5 // vpminsb ymm2, ymm2, ymm5 + LONG $0x387de2c4; BYTE $0xc4 // vpminsb ymm0, ymm0, ymm4 + +LBB0_10: + LONG $0x3c75e2c4; BYTE $0xcb // vpmaxsb ymm1, ymm1, ymm3 + LONG $0x397de3c4; WORD $0x01cb // vextracti128 xmm3, ymm1, 1 + LONG $0x3c71e2c4; BYTE $0xcb // vpmaxsb xmm1, xmm1, xmm3 + LONG $0x4deff1c5; BYTE $0x40 // vpxor xmm1, xmm1, oword 64[rbp] /* [rip + .LCPI0_2] */ + LONG $0x387de2c4; BYTE $0xc2 // vpminsb ymm0, ymm0, ymm2 + LONG $0xd171e9c5; BYTE $0x08 // vpsrlw xmm2, xmm1, 8 + LONG $0xcadaf1c5 // vpminub xmm1, xmm1, xmm2 + LONG $0x4179e2c4; BYTE $0xc9 // vphminposuw xmm1, xmm1 + LONG $0x7e79c1c4; BYTE $0xc8 // vmovd r8d, xmm1 + LONG $0x7ff08041 // xor r8b, 127 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1 + LONG $0x3879e2c4; BYTE $0xc1 // vpminsb xmm0, xmm0, xmm1 + LONG $0x45eff9c5; BYTE $0x50 // vpxor xmm0, xmm0, oword 80[rbp] /* [rip + .LCPI0_3] */ + LONG $0xd071f1c5; BYTE $0x08 // vpsrlw xmm1, xmm0, 8 + LONG $0xc1daf9c5 // vpminub xmm0, xmm0, xmm1 + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0xc67ef9c5 // vmovd esi, xmm0 + LONG $0x80f68040 // xor sil, -128 + WORD $0x394d; BYTE $0xca // cmp r10, r9 + JE LBB0_12 + +LBB0_11: + LONG $0x04b60f42; BYTE $0x17 // movzx eax, byte [rdi + r10] + WORD $0x3840; BYTE $0xc6 // cmp sil, al + LONG $0xf6b60f40 // movzx esi, sil + WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax + WORD $0x3841; BYTE $0xc0 // cmp r8b, al + LONG $0xc0b60f45 // movzx r8d, r8b + LONG $0xc04c0f44 // cmovl r8d, eax + LONG $0x01c28349 // add r10, 1 + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB0_11 + +LBB0_12: + WORD $0x8844; BYTE $0x01 // mov byte [rcx], r8b + WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil + VZEROUPPER + RET + +LBB0_5: + LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI0_0] */ + LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI0_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB0_9 + JMP LBB0_10 + +TEXT ยท_uint8_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB1_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x3f // cmp esi, 63 + JA LBB1_4 + WORD $0xb640; BYTE $0xff // mov sil, -1 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0xc031 // xor eax, eax + JMP LBB1_11 + +LBB1_1: + WORD $0xb640; BYTE $0xff // mov sil, -1 + WORD $0xc031 // xor eax, eax + JMP LBB1_12 + +LBB1_4: + WORD $0x8945; BYTE $0xca // mov r10d, r9d + LONG $0xc0e28341 // and r10d, -64 + LONG $0xc0428d49 // lea rax, [r10 - 64] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x06e8c149 // shr r8, 6 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB1_5 + WORD $0x894c; BYTE $0xc6 // mov rsi, r8 + LONG $0xfee68348 // and rsi, -2 + WORD $0xf748; BYTE $0xde // neg rsi + LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0 + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + WORD $0xc031 // xor eax, eax + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3 + +LBB1_7: + LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax] + LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32] + LONG $0x746ffec5; WORD $0x4007 // vmovdqu ymm6, yword [rdi + rax + 64] + LONG $0x7c6ffec5; WORD $0x6007 // vmovdqu ymm7, yword [rdi + rax + 96] + LONG $0xccdaf5c5 // vpminub ymm1, ymm1, ymm4 + LONG $0xd5daedc5 // vpminub ymm2, ymm2, ymm5 + LONG $0xc4defdc5 // vpmaxub ymm0, ymm0, ymm4 + LONG $0xdddee5c5 // vpmaxub ymm3, ymm3, ymm5 + LONG $0xcedaf5c5 // vpminub ymm1, ymm1, ymm6 + LONG $0xd7daedc5 // vpminub ymm2, ymm2, ymm7 + LONG $0xc6defdc5 // vpmaxub ymm0, ymm0, ymm6 + LONG $0xdfdee5c5 // vpmaxub ymm3, ymm3, ymm7 + LONG $0x80e88348 // sub rax, -128 + LONG $0x02c68348 // add rsi, 2 + JNE LBB1_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB1_10 + +LBB1_9: + LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax] + LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32] + LONG $0xdddee5c5 // vpmaxub ymm3, ymm3, ymm5 + LONG $0xc4defdc5 // vpmaxub ymm0, ymm0, ymm4 + LONG $0xd5daedc5 // vpminub ymm2, ymm2, ymm5 + LONG $0xccdaf5c5 // vpminub ymm1, ymm1, ymm4 + +LBB1_10: + LONG $0xcadaf5c5 // vpminub ymm1, ymm1, ymm2 + LONG $0xc3defdc5 // vpmaxub ymm0, ymm0, ymm3 + LONG $0x397de3c4; WORD $0x01c2 // vextracti128 xmm2, ymm0, 1 + LONG $0xc2def9c5 // vpmaxub xmm0, xmm0, xmm2 + LONG $0xd276e9c5 // vpcmpeqd xmm2, xmm2, xmm2 + LONG $0xc2eff9c5 // vpxor xmm0, xmm0, xmm2 + LONG $0xd071e9c5; BYTE $0x08 // vpsrlw xmm2, xmm0, 8 + LONG $0xc2daf9c5 // vpminub xmm0, xmm0, xmm2 + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0xc07ef9c5 // vmovd eax, xmm0 + WORD $0xd0f6 // not al + LONG $0x397de3c4; WORD $0x01c8 // vextracti128 xmm0, ymm1, 1 + LONG $0xc0daf1c5 // vpminub xmm0, xmm1, xmm0 + LONG $0xd071f1c5; BYTE $0x08 // vpsrlw xmm1, xmm0, 8 + LONG $0xc1daf9c5 // vpminub xmm0, xmm0, xmm1 + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0xc67ef9c5 // vmovd esi, xmm0 + WORD $0x394d; BYTE $0xca // cmp r10, r9 + JE LBB1_12 + +LBB1_11: + LONG $0x04b60f46; BYTE $0x17 // movzx r8d, byte [rdi + r10] + WORD $0x3844; BYTE $0xc6 // cmp sil, r8b + LONG $0xf6b60f40 // movzx esi, sil + LONG $0xf0430f41 // cmovae esi, r8d + WORD $0x3844; BYTE $0xc0 // cmp al, r8b + WORD $0xb60f; BYTE $0xc0 // movzx eax, al + LONG $0xc0460f41 // cmovbe eax, r8d + LONG $0x01c28349 // add r10, 1 + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB1_11 + +LBB1_12: + WORD $0x0188 // mov byte [rcx], al + WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil + VZEROUPPER + RET + +LBB1_5: + LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0 + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + WORD $0xc031 // xor eax, eax + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB1_9 + JMP LBB1_10 + +DATA LCDATA2<>+0x000(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x008(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x010(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x018(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x020(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x028(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x030(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x038(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x040(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x048(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x050(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x058(SB)/8, $0x8000800080008000 +GLOBL LCDATA2<>(SB), 8, $96 + +TEXT ยท_int16_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA2<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB2_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB2_4 + LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768 + LONG $0x7fffbe66 // mov si, 32767 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB2_11 + +LBB2_1: + LONG $0x7fffbe66 // mov si, 32767 + LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768 + JMP LBB2_12 + +LBB2_4: + WORD $0x8945; BYTE $0xca // mov r10d, r9d + LONG $0xe0e28341 // and r10d, -32 + LONG $0xe0428d49 // lea rax, [r10 - 32] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x05e8c149 // shr r8, 5 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB2_5 + WORD $0x894c; BYTE $0xc6 // mov rsi, r8 + LONG $0xfee68348 // and rsi, -2 + WORD $0xf748; BYTE $0xde // neg rsi + LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI2_0] */ + LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI2_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1 + +LBB2_7: + LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax] + LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32] + LONG $0x746ffec5; WORD $0x4047 // vmovdqu ymm6, yword [rdi + 2*rax + 64] + LONG $0x7c6ffec5; WORD $0x6047 // vmovdqu ymm7, yword [rdi + 2*rax + 96] + LONG $0xc4eafdc5 // vpminsw ymm0, ymm0, ymm4 + LONG $0xd5eaedc5 // vpminsw ymm2, ymm2, ymm5 + LONG $0xcceef5c5 // vpmaxsw ymm1, ymm1, ymm4 + LONG $0xddeee5c5 // vpmaxsw ymm3, ymm3, ymm5 + LONG $0xc6eafdc5 // vpminsw ymm0, ymm0, ymm6 + LONG $0xd7eaedc5 // vpminsw ymm2, ymm2, ymm7 + LONG $0xceeef5c5 // vpmaxsw ymm1, ymm1, ymm6 + LONG $0xdfeee5c5 // vpmaxsw ymm3, ymm3, ymm7 + LONG $0x40c08348 // add rax, 64 + LONG $0x02c68348 // add rsi, 2 + JNE LBB2_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB2_10 + +LBB2_9: + LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax] + LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32] + LONG $0xddeee5c5 // vpmaxsw ymm3, ymm3, ymm5 + LONG $0xcceef5c5 // vpmaxsw ymm1, ymm1, ymm4 + LONG $0xd5eaedc5 // vpminsw ymm2, ymm2, ymm5 + LONG $0xc4eafdc5 // vpminsw ymm0, ymm0, ymm4 + +LBB2_10: + LONG $0xcbeef5c5 // vpmaxsw ymm1, ymm1, ymm3 + LONG $0x397de3c4; WORD $0x01cb // vextracti128 xmm3, ymm1, 1 + LONG $0xcbeef1c5 // vpmaxsw xmm1, xmm1, xmm3 + LONG $0x4deff1c5; BYTE $0x40 // vpxor xmm1, xmm1, oword 64[rbp] /* [rip + .LCPI2_2] */ + LONG $0xc2eafdc5 // vpminsw ymm0, ymm0, ymm2 + LONG $0x4179e2c4; BYTE $0xc9 // vphminposuw xmm1, xmm1 + LONG $0x7e79c1c4; BYTE $0xc8 // vmovd r8d, xmm1 + LONG $0xfff08141; WORD $0x007f; BYTE $0x00 // xor r8d, 32767 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1 + LONG $0xc1eaf9c5 // vpminsw xmm0, xmm0, xmm1 + LONG $0x45eff9c5; BYTE $0x50 // vpxor xmm0, xmm0, oword 80[rbp] /* [rip + .LCPI2_3] */ + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0xc67ef9c5 // vmovd esi, xmm0 + LONG $0x8000f681; WORD $0x0000 // xor esi, 32768 + WORD $0x394d; BYTE $0xca // cmp r10, r9 + JE LBB2_12 + +LBB2_11: + LONG $0x04b70f42; BYTE $0x57 // movzx eax, word [rdi + 2*r10] + WORD $0x3966; BYTE $0xc6 // cmp si, ax + WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax + LONG $0xc0394166 // cmp r8w, ax + LONG $0xc04c0f44 // cmovl r8d, eax + LONG $0x01c28349 // add r10, 1 + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB2_11 + +LBB2_12: + LONG $0x01894466 // mov word [rcx], r8w + WORD $0x8966; BYTE $0x32 // mov word [rdx], si + VZEROUPPER + RET + +LBB2_5: + LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI2_0] */ + LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI2_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB2_9 + JMP LBB2_10 + +TEXT ยท_uint16_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB3_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB3_4 + LONG $0xffb84166; BYTE $0xff // mov r8w, -1 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0xf631 // xor esi, esi + JMP LBB3_11 + +LBB3_1: + LONG $0xffb84166; BYTE $0xff // mov r8w, -1 + WORD $0xf631 // xor esi, esi + JMP LBB3_12 + +LBB3_4: + WORD $0x8945; BYTE $0xca // mov r10d, r9d + LONG $0xe0e28341 // and r10d, -32 + LONG $0xe0428d49 // lea rax, [r10 - 32] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x05e8c149 // shr r8, 5 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB3_5 + WORD $0x894c; BYTE $0xc6 // mov rsi, r8 + LONG $0xfee68348 // and rsi, -2 + WORD $0xf748; BYTE $0xde // neg rsi + LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0 + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + WORD $0xc031 // xor eax, eax + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3 + +LBB3_7: + LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax] + LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32] + LONG $0x746ffec5; WORD $0x4047 // vmovdqu ymm6, yword [rdi + 2*rax + 64] + LONG $0x7c6ffec5; WORD $0x6047 // vmovdqu ymm7, yword [rdi + 2*rax + 96] + LONG $0x3a75e2c4; BYTE $0xcc // vpminuw ymm1, ymm1, ymm4 + LONG $0x3a6de2c4; BYTE $0xd5 // vpminuw ymm2, ymm2, ymm5 + LONG $0x3e7de2c4; BYTE $0xc4 // vpmaxuw ymm0, ymm0, ymm4 + LONG $0x3e65e2c4; BYTE $0xdd // vpmaxuw ymm3, ymm3, ymm5 + LONG $0x3a75e2c4; BYTE $0xce // vpminuw ymm1, ymm1, ymm6 + LONG $0x3a6de2c4; BYTE $0xd7 // vpminuw ymm2, ymm2, ymm7 + LONG $0x3e7de2c4; BYTE $0xc6 // vpmaxuw ymm0, ymm0, ymm6 + LONG $0x3e65e2c4; BYTE $0xdf // vpmaxuw ymm3, ymm3, ymm7 + LONG $0x40c08348 // add rax, 64 + LONG $0x02c68348 // add rsi, 2 + JNE LBB3_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB3_10 + +LBB3_9: + LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax] + LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32] + LONG $0x3e65e2c4; BYTE $0xdd // vpmaxuw ymm3, ymm3, ymm5 + LONG $0x3e7de2c4; BYTE $0xc4 // vpmaxuw ymm0, ymm0, ymm4 + LONG $0x3a6de2c4; BYTE $0xd5 // vpminuw ymm2, ymm2, ymm5 + LONG $0x3a75e2c4; BYTE $0xcc // vpminuw ymm1, ymm1, ymm4 + +LBB3_10: + LONG $0x3a75e2c4; BYTE $0xca // vpminuw ymm1, ymm1, ymm2 + LONG $0x3e7de2c4; BYTE $0xc3 // vpmaxuw ymm0, ymm0, ymm3 + LONG $0x397de3c4; WORD $0x01c2 // vextracti128 xmm2, ymm0, 1 + LONG $0x3e79e2c4; BYTE $0xc2 // vpmaxuw xmm0, xmm0, xmm2 + LONG $0xd276e9c5 // vpcmpeqd xmm2, xmm2, xmm2 + LONG $0xc2eff9c5 // vpxor xmm0, xmm0, xmm2 + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0xc67ef9c5 // vmovd esi, xmm0 + WORD $0xd6f7 // not esi + LONG $0x397de3c4; WORD $0x01c8 // vextracti128 xmm0, ymm1, 1 + LONG $0x3a71e2c4; BYTE $0xc0 // vpminuw xmm0, xmm1, xmm0 + LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0 + LONG $0x7e79c1c4; BYTE $0xc0 // vmovd r8d, xmm0 + WORD $0x394d; BYTE $0xca // cmp r10, r9 + JE LBB3_12 + +LBB3_11: + LONG $0x04b70f42; BYTE $0x57 // movzx eax, word [rdi + 2*r10] + LONG $0xc0394166 // cmp r8w, ax + LONG $0xc0430f44 // cmovae r8d, eax + WORD $0x3966; BYTE $0xc6 // cmp si, ax + WORD $0x460f; BYTE $0xf0 // cmovbe esi, eax + LONG $0x01c28349 // add r10, 1 + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB3_11 + +LBB3_12: + WORD $0x8966; BYTE $0x31 // mov word [rcx], si + LONG $0x02894466 // mov word [rdx], r8w + VZEROUPPER + RET + +LBB3_5: + LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0 + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + WORD $0xc031 // xor eax, eax + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB3_9 + JMP LBB3_10 + +DATA LCDATA3<>+0x000(SB)/8, $0x7fffffff80000000 +GLOBL LCDATA3<>(SB), 8, $8 + +TEXT ยท_int32_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA3<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB4_1 + WORD $0x8941; BYTE $0xf0 // mov r8d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB4_4 + LONG $0x0000ba41; WORD $0x8000 // mov r10d, -2147483648 + LONG $0xffffffb8; BYTE $0x7f // mov eax, 2147483647 + WORD $0x3145; BYTE $0xc9 // xor r9d, r9d + JMP LBB4_7 + +LBB4_1: + LONG $0xffffffb8; BYTE $0x7f // mov eax, 2147483647 + LONG $0x000000be; BYTE $0x80 // mov esi, -2147483648 + JMP LBB4_8 + +LBB4_4: + WORD $0x8945; BYTE $0xc1 // mov r9d, r8d + LONG $0x587de2c4; WORD $0x0065 // vpbroadcastd ymm4, dword 0[rbp] /* [rip + .LCPI4_0] */ + LONG $0xe0e18341 // and r9d, -32 + LONG $0x587de2c4; WORD $0x0445 // vpbroadcastd ymm0, dword 4[rbp] /* [rip + .LCPI4_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xc86ffdc5 // vmovdqa ymm1, ymm0 + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xd86ffdc5 // vmovdqa ymm3, ymm0 + LONG $0xec6ffdc5 // vmovdqa ymm5, ymm4 + LONG $0xf46ffdc5 // vmovdqa ymm6, ymm4 + LONG $0xfc6ffdc5 // vmovdqa ymm7, ymm4 + +LBB4_5: + LONG $0x046f7ec5; BYTE $0x87 // vmovdqu ymm8, yword [rdi + 4*rax] + LONG $0x4c6f7ec5; WORD $0x2087 // vmovdqu ymm9, yword [rdi + 4*rax + 32] + LONG $0x546f7ec5; WORD $0x4087 // vmovdqu ymm10, yword [rdi + 4*rax + 64] + LONG $0x5c6f7ec5; WORD $0x6087 // vmovdqu ymm11, yword [rdi + 4*rax + 96] + LONG $0x397dc2c4; BYTE $0xc0 // vpminsd ymm0, ymm0, ymm8 + LONG $0x3975c2c4; BYTE $0xc9 // vpminsd ymm1, ymm1, ymm9 + LONG $0x396dc2c4; BYTE $0xd2 // vpminsd ymm2, ymm2, ymm10 + LONG $0x3965c2c4; BYTE $0xdb // vpminsd ymm3, ymm3, ymm11 + LONG $0x3d5dc2c4; BYTE $0xe0 // vpmaxsd ymm4, ymm4, ymm8 + LONG $0x3d55c2c4; BYTE $0xe9 // vpmaxsd ymm5, ymm5, ymm9 + LONG $0x3d4dc2c4; BYTE $0xf2 // vpmaxsd ymm6, ymm6, ymm10 + LONG $0x3d45c2c4; BYTE $0xfb // vpmaxsd ymm7, ymm7, ymm11 + LONG $0x20c08348 // add rax, 32 + WORD $0x3949; BYTE $0xc1 // cmp r9, rax + JNE LBB4_5 + LONG $0x3d5de2c4; BYTE $0xe5 // vpmaxsd ymm4, ymm4, ymm5 + LONG $0x3d5de2c4; BYTE $0xe6 // vpmaxsd ymm4, ymm4, ymm6 + LONG $0x3d5de2c4; BYTE $0xe7 // vpmaxsd ymm4, ymm4, ymm7 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 xmm5, ymm4, 1 + LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5 + LONG $0xec70f9c5; BYTE $0x4e // vpshufd xmm5, xmm4, 78 + LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5 + LONG $0xec70f9c5; BYTE $0xe5 // vpshufd xmm5, xmm4, 229 + LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5 + LONG $0x7e79c1c4; BYTE $0xe2 // vmovd r10d, xmm4 + LONG $0x397de2c4; BYTE $0xc1 // vpminsd ymm0, ymm0, ymm1 + LONG $0x397de2c4; BYTE $0xc2 // vpminsd ymm0, ymm0, ymm2 + LONG $0x397de2c4; BYTE $0xc3 // vpminsd ymm0, ymm0, ymm3 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1 + LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1 + LONG $0xc870f9c5; BYTE $0x4e // vpshufd xmm1, xmm0, 78 + LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1 + LONG $0xc870f9c5; BYTE $0xe5 // vpshufd xmm1, xmm0, 229 + LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1 + LONG $0xc07ef9c5 // vmovd eax, xmm0 + WORD $0x8944; BYTE $0xd6 // mov esi, r10d + WORD $0x394d; BYTE $0xc1 // cmp r9, r8 + JE LBB4_8 + +LBB4_7: + LONG $0x8f348b42 // mov esi, dword [rdi + 4*r9] + WORD $0xf039 // cmp eax, esi + WORD $0x4f0f; BYTE $0xc6 // cmovg eax, esi + WORD $0x3941; BYTE $0xf2 // cmp r10d, esi + LONG $0xf24d0f41 // cmovge esi, r10d + LONG $0x01c18349 // add r9, 1 + WORD $0x8941; BYTE $0xf2 // mov r10d, esi + WORD $0x394d; BYTE $0xc8 // cmp r8, r9 + JNE LBB4_7 + +LBB4_8: + WORD $0x3189 // mov dword [rcx], esi + WORD $0x0289 // mov dword [rdx], eax + VZEROUPPER + RET + +TEXT ยท_uint32_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB5_1 + WORD $0x8941; BYTE $0xf0 // mov r8d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB5_4 + WORD $0x3145; BYTE $0xc9 // xor r9d, r9d + LONG $0xffffffb8; BYTE $0xff // mov eax, -1 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB5_7 + +LBB5_1: + LONG $0xffffffb8; BYTE $0xff // mov eax, -1 + WORD $0xf631 // xor esi, esi + JMP LBB5_8 + +LBB5_4: + WORD $0x8945; BYTE $0xc1 // mov r9d, r8d + LONG $0xe0e18341 // and r9d, -32 + LONG $0xe4efd9c5 // vpxor xmm4, xmm4, xmm4 + LONG $0xc076fdc5 // vpcmpeqd ymm0, ymm0, ymm0 + WORD $0xc031 // xor eax, eax + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xdb76e5c5 // vpcmpeqd ymm3, ymm3, ymm3 + LONG $0xedefd1c5 // vpxor xmm5, xmm5, xmm5 + LONG $0xf6efc9c5 // vpxor xmm6, xmm6, xmm6 + LONG $0xffefc1c5 // vpxor xmm7, xmm7, xmm7 + +LBB5_5: + LONG $0x046f7ec5; BYTE $0x87 // vmovdqu ymm8, yword [rdi + 4*rax] + LONG $0x4c6f7ec5; WORD $0x2087 // vmovdqu ymm9, yword [rdi + 4*rax + 32] + LONG $0x546f7ec5; WORD $0x4087 // vmovdqu ymm10, yword [rdi + 4*rax + 64] + LONG $0x5c6f7ec5; WORD $0x6087 // vmovdqu ymm11, yword [rdi + 4*rax + 96] + LONG $0x3b7dc2c4; BYTE $0xc0 // vpminud ymm0, ymm0, ymm8 + LONG $0x3b75c2c4; BYTE $0xc9 // vpminud ymm1, ymm1, ymm9 + LONG $0x3b6dc2c4; BYTE $0xd2 // vpminud ymm2, ymm2, ymm10 + LONG $0x3b65c2c4; BYTE $0xdb // vpminud ymm3, ymm3, ymm11 + LONG $0x3f5dc2c4; BYTE $0xe0 // vpmaxud ymm4, ymm4, ymm8 + LONG $0x3f55c2c4; BYTE $0xe9 // vpmaxud ymm5, ymm5, ymm9 + LONG $0x3f4dc2c4; BYTE $0xf2 // vpmaxud ymm6, ymm6, ymm10 + LONG $0x3f45c2c4; BYTE $0xfb // vpmaxud ymm7, ymm7, ymm11 + LONG $0x20c08348 // add rax, 32 + WORD $0x3949; BYTE $0xc1 // cmp r9, rax + JNE LBB5_5 + LONG $0x3f5de2c4; BYTE $0xe5 // vpmaxud ymm4, ymm4, ymm5 + LONG $0x3f5de2c4; BYTE $0xe6 // vpmaxud ymm4, ymm4, ymm6 + LONG $0x3f5de2c4; BYTE $0xe7 // vpmaxud ymm4, ymm4, ymm7 + LONG $0x397de3c4; WORD $0x01e5 // vextracti128 xmm5, ymm4, 1 + LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5 + LONG $0xec70f9c5; BYTE $0x4e // vpshufd xmm5, xmm4, 78 + LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5 + LONG $0xec70f9c5; BYTE $0xe5 // vpshufd xmm5, xmm4, 229 + LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5 + LONG $0x7e79c1c4; BYTE $0xe2 // vmovd r10d, xmm4 + LONG $0x3b7de2c4; BYTE $0xc1 // vpminud ymm0, ymm0, ymm1 + LONG $0x3b7de2c4; BYTE $0xc2 // vpminud ymm0, ymm0, ymm2 + LONG $0x3b7de2c4; BYTE $0xc3 // vpminud ymm0, ymm0, ymm3 + LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1 + LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1 + LONG $0xc870f9c5; BYTE $0x4e // vpshufd xmm1, xmm0, 78 + LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1 + LONG $0xc870f9c5; BYTE $0xe5 // vpshufd xmm1, xmm0, 229 + LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1 + LONG $0xc07ef9c5 // vmovd eax, xmm0 + WORD $0x8944; BYTE $0xd6 // mov esi, r10d + WORD $0x394d; BYTE $0xc1 // cmp r9, r8 + JE LBB5_8 + +LBB5_7: + LONG $0x8f348b42 // mov esi, dword [rdi + 4*r9] + WORD $0xf039 // cmp eax, esi + WORD $0x430f; BYTE $0xc6 // cmovae eax, esi + WORD $0x3941; BYTE $0xf2 // cmp r10d, esi + LONG $0xf2470f41 // cmova esi, r10d + LONG $0x01c18349 // add r9, 1 + WORD $0x8941; BYTE $0xf2 // mov r10d, esi + WORD $0x394d; BYTE $0xc8 // cmp r8, r9 + JNE LBB5_7 + +LBB5_8: + WORD $0x3189 // mov dword [rcx], esi + WORD $0x0289 // mov dword [rdx], eax + VZEROUPPER + RET + +DATA LCDATA4<>+0x000(SB)/8, $0x8000000000000000 +DATA LCDATA4<>+0x008(SB)/8, $0x7fffffffffffffff +GLOBL LCDATA4<>(SB), 8, $16 + +TEXT ยท_int64_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA4<>(SB), BP + + QUAD $0xffffffffffffb848; WORD $0x7fff // mov rax, 9223372036854775807 + WORD $0xf685 // test esi, esi + JLE LBB6_1 + WORD $0x8941; BYTE $0xf0 // mov r8d, esi + WORD $0xfe83; BYTE $0x0f // cmp esi, 15 + JA LBB6_4 + LONG $0x01508d4c // lea r10, [rax + 1] + WORD $0x3145; BYTE $0xc9 // xor r9d, r9d + JMP LBB6_7 + +LBB6_1: + LONG $0x01708d48 // lea rsi, [rax + 1] + JMP LBB6_8 + +LBB6_4: + WORD $0x8945; BYTE $0xc1 // mov r9d, r8d + LONG $0x597de2c4; WORD $0x0065 // vpbroadcastq ymm4, qword 0[rbp] /* [rip + .LCPI6_0] */ + LONG $0xf0e18341 // and r9d, -16 + LONG $0x597de2c4; WORD $0x0845 // vpbroadcastq ymm0, qword 8[rbp] /* [rip + .LCPI6_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd86ffdc5 // vmovdqa ymm3, ymm0 + LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0 + LONG $0xc86ffdc5 // vmovdqa ymm1, ymm0 + LONG $0xfc6ffdc5 // vmovdqa ymm7, ymm4 + LONG $0xf46ffdc5 // vmovdqa ymm6, ymm4 + LONG $0xec6ffdc5 // vmovdqa ymm5, ymm4 + +LBB6_5: + LONG $0x046f7ec5; BYTE $0xc7 // vmovdqu ymm8, yword [rdi + 8*rax] + LONG $0x373d62c4; BYTE $0xc8 // vpcmpgtq ymm9, ymm8, ymm0 + LONG $0x4b3de3c4; WORD $0x90c0 // vblendvpd ymm0, ymm8, ymm0, ymm9 + LONG $0x4c6f7ec5; WORD $0x20c7 // vmovdqu ymm9, yword [rdi + 8*rax + 32] + LONG $0x373562c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm9, ymm3 + LONG $0x4b35e3c4; WORD $0xa0db // vblendvpd ymm3, ymm9, ymm3, ymm10 + LONG $0x546f7ec5; WORD $0x40c7 // vmovdqu ymm10, yword [rdi + 8*rax + 64] + LONG $0x372d62c4; BYTE $0xda // vpcmpgtq ymm11, ymm10, ymm2 + LONG $0x4b2de3c4; WORD $0xb0d2 // vblendvpd ymm2, ymm10, ymm2, ymm11 + LONG $0x5c6f7ec5; WORD $0x60c7 // vmovdqu ymm11, yword [rdi + 8*rax + 96] + LONG $0x372562c4; BYTE $0xe1 // vpcmpgtq ymm12, ymm11, ymm1 + LONG $0x4b25e3c4; WORD $0xc0c9 // vblendvpd ymm1, ymm11, ymm1, ymm12 + LONG $0x375d42c4; BYTE $0xe0 // vpcmpgtq ymm12, ymm4, ymm8 + LONG $0x4b3de3c4; WORD $0xc0e4 // vblendvpd ymm4, ymm8, ymm4, ymm12 + LONG $0x374542c4; BYTE $0xc1 // vpcmpgtq ymm8, ymm7, ymm9 + LONG $0x4b35e3c4; WORD $0x80ff // vblendvpd ymm7, ymm9, ymm7, ymm8 + LONG $0x374d42c4; BYTE $0xc2 // vpcmpgtq ymm8, ymm6, ymm10 + LONG $0x4b2de3c4; WORD $0x80f6 // vblendvpd ymm6, ymm10, ymm6, ymm8 + LONG $0x375542c4; BYTE $0xc3 // vpcmpgtq ymm8, ymm5, ymm11 + LONG $0x4b25e3c4; WORD $0x80ed // vblendvpd ymm5, ymm11, ymm5, ymm8 + LONG $0x10c08348 // add rax, 16 + WORD $0x3949; BYTE $0xc1 // cmp r9, rax + JNE LBB6_5 + LONG $0x375d62c4; BYTE $0xc7 // vpcmpgtq ymm8, ymm4, ymm7 + LONG $0x4b45e3c4; WORD $0x80e4 // vblendvpd ymm4, ymm7, ymm4, ymm8 + LONG $0x375de2c4; BYTE $0xfe // vpcmpgtq ymm7, ymm4, ymm6 + LONG $0x4b4de3c4; WORD $0x70e4 // vblendvpd ymm4, ymm6, ymm4, ymm7 + LONG $0x375de2c4; BYTE $0xf5 // vpcmpgtq ymm6, ymm4, ymm5 + LONG $0x4b55e3c4; WORD $0x60e4 // vblendvpd ymm4, ymm5, ymm4, ymm6 + LONG $0x197de3c4; WORD $0x01e5 // vextractf128 xmm5, ymm4, 1 + LONG $0x3759e2c4; BYTE $0xf5 // vpcmpgtq xmm6, xmm4, xmm5 + LONG $0x4b51e3c4; WORD $0x60e4 // vblendvpd xmm4, xmm5, xmm4, xmm6 + LONG $0x0479e3c4; WORD $0x4eec // vpermilps xmm5, xmm4, 78 + LONG $0x3759e2c4; BYTE $0xf5 // vpcmpgtq xmm6, xmm4, xmm5 + LONG $0x4b51e3c4; WORD $0x60e4 // vblendvpd xmm4, xmm5, xmm4, xmm6 + LONG $0x7ef9c1c4; BYTE $0xe2 // vmovq r10, xmm4 + LONG $0x3765e2c4; BYTE $0xe0 // vpcmpgtq ymm4, ymm3, ymm0 + LONG $0x4b65e3c4; WORD $0x40c0 // vblendvpd ymm0, ymm3, ymm0, ymm4 + LONG $0x376de2c4; BYTE $0xd8 // vpcmpgtq ymm3, ymm2, ymm0 + LONG $0x4b6de3c4; WORD $0x30c0 // vblendvpd ymm0, ymm2, ymm0, ymm3 + LONG $0x3775e2c4; BYTE $0xd0 // vpcmpgtq ymm2, ymm1, ymm0 + LONG $0x4b75e3c4; WORD $0x20c0 // vblendvpd ymm0, ymm1, ymm0, ymm2 + LONG $0x197de3c4; WORD $0x01c1 // vextractf128 xmm1, ymm0, 1 + LONG $0x3771e2c4; BYTE $0xd0 // vpcmpgtq xmm2, xmm1, xmm0 + LONG $0x4b71e3c4; WORD $0x20c0 // vblendvpd xmm0, xmm1, xmm0, xmm2 + LONG $0x0479e3c4; WORD $0x4ec8 // vpermilps xmm1, xmm0, 78 + LONG $0x3771e2c4; BYTE $0xd0 // vpcmpgtq xmm2, xmm1, xmm0 + LONG $0x4b71e3c4; WORD $0x20c0 // vblendvpd xmm0, xmm1, xmm0, xmm2 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq rax, xmm0 + WORD $0x894c; BYTE $0xd6 // mov rsi, r10 + WORD $0x394d; BYTE $0xc1 // cmp r9, r8 + JE LBB6_8 + +LBB6_7: + LONG $0xcf348b4a // mov rsi, qword [rdi + 8*r9] + WORD $0x3948; BYTE $0xf0 // cmp rax, rsi + LONG $0xc64f0f48 // cmovg rax, rsi + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + LONG $0xf24d0f49 // cmovge rsi, r10 + LONG $0x01c18349 // add r9, 1 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + WORD $0x394d; BYTE $0xc8 // cmp r8, r9 + JNE LBB6_7 + +LBB6_8: + WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi + WORD $0x8948; BYTE $0x02 // mov qword [rdx], rax + VZEROUPPER + RET + +DATA LCDATA5<>+0x000(SB)/8, $0x8000000000000000 +GLOBL LCDATA5<>(SB), 8, $8 + +TEXT ยท_uint64_max_min_avx2(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA5<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB7_1 + WORD $0x8941; BYTE $0xf0 // mov r8d, esi + WORD $0xfe83; BYTE $0x0f // cmp esi, 15 + JA LBB7_4 + LONG $0xffc0c748; WORD $0xffff; BYTE $0xff // mov rax, -1 + WORD $0x3145; BYTE $0xc9 // xor r9d, r9d + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB7_7 + +LBB7_1: + LONG $0xffc0c748; WORD $0xffff; BYTE $0xff // mov rax, -1 + WORD $0xf631 // xor esi, esi + JMP LBB7_8 + +LBB7_4: + WORD $0x8945; BYTE $0xc1 // mov r9d, r8d + LONG $0xf0e18341 // and r9d, -16 + LONG $0xedefd1c5 // vpxor xmm5, xmm5, xmm5 + LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1 + WORD $0xc031 // xor eax, eax + LONG $0x597de2c4; WORD $0x0045 // vpbroadcastq ymm0, qword 0[rbp] /* [rip + .LCPI7_0] */ + LONG $0xe476ddc5 // vpcmpeqd ymm4, ymm4, ymm4 + LONG $0xdb76e5c5 // vpcmpeqd ymm3, ymm3, ymm3 + LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2 + LONG $0xef3941c4; BYTE $0xc0 // vpxor xmm8, xmm8, xmm8 + LONG $0xffefc1c5 // vpxor xmm7, xmm7, xmm7 + LONG $0xf6efc9c5 // vpxor xmm6, xmm6, xmm6 + +LBB7_5: + LONG $0x0c6f7ec5; BYTE $0xc7 // vmovdqu ymm9, yword [rdi + 8*rax] + LONG $0xd0ef75c5 // vpxor ymm10, ymm1, ymm0 + LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0 + LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10 + LONG $0x4b35e3c4; WORD $0xa0c9 // vblendvpd ymm1, ymm9, ymm1, ymm10 + LONG $0xd0ef55c5 // vpxor ymm10, ymm5, ymm0 + LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11 + LONG $0x4b35e3c4; WORD $0xa0ed // vblendvpd ymm5, ymm9, ymm5, ymm10 + LONG $0x4c6f7ec5; WORD $0x20c7 // vmovdqu ymm9, yword [rdi + 8*rax + 32] + LONG $0xd0ef5dc5 // vpxor ymm10, ymm4, ymm0 + LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0 + LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10 + LONG $0x4b35e3c4; WORD $0xa0e4 // vblendvpd ymm4, ymm9, ymm4, ymm10 + LONG $0xd0ef3dc5 // vpxor ymm10, ymm8, ymm0 + LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11 + LONG $0x5c6f7ec5; WORD $0x40c7 // vmovdqu ymm11, yword [rdi + 8*rax + 64] + LONG $0x4b3543c4; WORD $0xa0c0 // vblendvpd ymm8, ymm9, ymm8, ymm10 + LONG $0xc8ef65c5 // vpxor ymm9, ymm3, ymm0 + LONG $0xd0ef25c5 // vpxor ymm10, ymm11, ymm0 + LONG $0x372d42c4; BYTE $0xc9 // vpcmpgtq ymm9, ymm10, ymm9 + LONG $0x4b25e3c4; WORD $0x90db // vblendvpd ymm3, ymm11, ymm3, ymm9 + LONG $0xc8ef45c5 // vpxor ymm9, ymm7, ymm0 + LONG $0x373542c4; BYTE $0xca // vpcmpgtq ymm9, ymm9, ymm10 + LONG $0x4b25e3c4; WORD $0x90ff // vblendvpd ymm7, ymm11, ymm7, ymm9 + LONG $0x4c6f7ec5; WORD $0x60c7 // vmovdqu ymm9, yword [rdi + 8*rax + 96] + LONG $0xd0ef6dc5 // vpxor ymm10, ymm2, ymm0 + LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0 + LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10 + LONG $0x4b35e3c4; WORD $0xa0d2 // vblendvpd ymm2, ymm9, ymm2, ymm10 + LONG $0xd0ef4dc5 // vpxor ymm10, ymm6, ymm0 + LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11 + LONG $0x4b35e3c4; WORD $0xa0f6 // vblendvpd ymm6, ymm9, ymm6, ymm10 + LONG $0x10c08348 // add rax, 16 + WORD $0x3949; BYTE $0xc1 // cmp r9, rax + JNE LBB7_5 + LONG $0xc8ef3dc5 // vpxor ymm9, ymm8, ymm0 + LONG $0xd0ef55c5 // vpxor ymm10, ymm5, ymm0 + LONG $0x372d42c4; BYTE $0xc9 // vpcmpgtq ymm9, ymm10, ymm9 + LONG $0x4b3de3c4; WORD $0x90ed // vblendvpd ymm5, ymm8, ymm5, ymm9 + LONG $0xc05755c5 // vxorpd ymm8, ymm5, ymm0 + LONG $0xc8ef45c5 // vpxor ymm9, ymm7, ymm0 + LONG $0x373d42c4; BYTE $0xc1 // vpcmpgtq ymm8, ymm8, ymm9 + LONG $0x4b45e3c4; WORD $0x80ed // vblendvpd ymm5, ymm7, ymm5, ymm8 + LONG $0xf857d5c5 // vxorpd ymm7, ymm5, ymm0 + LONG $0xc0ef4dc5 // vpxor ymm8, ymm6, ymm0 + LONG $0x3745c2c4; BYTE $0xf8 // vpcmpgtq ymm7, ymm7, ymm8 + LONG $0x4b4de3c4; WORD $0x70ed // vblendvpd ymm5, ymm6, ymm5, ymm7 + LONG $0x197de3c4; WORD $0x01ee // vextractf128 xmm6, ymm5, 1 + LONG $0xc05749c5 // vxorpd xmm8, xmm6, xmm0 + LONG $0xf857d1c5 // vxorpd xmm7, xmm5, xmm0 + LONG $0x3741c2c4; BYTE $0xf8 // vpcmpgtq xmm7, xmm7, xmm8 + LONG $0x4b49e3c4; WORD $0x70ed // vblendvpd xmm5, xmm6, xmm5, xmm7 + LONG $0x0479e3c4; WORD $0x4ef5 // vpermilps xmm6, xmm5, 78 + LONG $0xc05751c5 // vxorpd xmm8, xmm5, xmm0 + LONG $0xf857c9c5 // vxorpd xmm7, xmm6, xmm0 + LONG $0x3739e2c4; BYTE $0xff // vpcmpgtq xmm7, xmm8, xmm7 + LONG $0x4b49e3c4; WORD $0x70ed // vblendvpd xmm5, xmm6, xmm5, xmm7 + LONG $0xf0eff5c5 // vpxor ymm6, ymm1, ymm0 + LONG $0xf8efddc5 // vpxor ymm7, ymm4, ymm0 + LONG $0x3745e2c4; BYTE $0xf6 // vpcmpgtq ymm6, ymm7, ymm6 + LONG $0x4b5de3c4; WORD $0x60c9 // vblendvpd ymm1, ymm4, ymm1, ymm6 + LONG $0xe057f5c5 // vxorpd ymm4, ymm1, ymm0 + LONG $0xf0efe5c5 // vpxor ymm6, ymm3, ymm0 + LONG $0x374de2c4; BYTE $0xe4 // vpcmpgtq ymm4, ymm6, ymm4 + LONG $0x4b65e3c4; WORD $0x40c9 // vblendvpd ymm1, ymm3, ymm1, ymm4 + LONG $0x7ef9c1c4; BYTE $0xea // vmovq r10, xmm5 + LONG $0xd857f5c5 // vxorpd ymm3, ymm1, ymm0 + LONG $0xe0efedc5 // vpxor ymm4, ymm2, ymm0 + LONG $0x375de2c4; BYTE $0xdb // vpcmpgtq ymm3, ymm4, ymm3 + LONG $0x4b6de3c4; WORD $0x30c9 // vblendvpd ymm1, ymm2, ymm1, ymm3 + LONG $0x197de3c4; WORD $0x01ca // vextractf128 xmm2, ymm1, 1 + LONG $0xd857f1c5 // vxorpd xmm3, xmm1, xmm0 + LONG $0xe057e9c5 // vxorpd xmm4, xmm2, xmm0 + LONG $0x3759e2c4; BYTE $0xdb // vpcmpgtq xmm3, xmm4, xmm3 + LONG $0x4b69e3c4; WORD $0x30c9 // vblendvpd xmm1, xmm2, xmm1, xmm3 + LONG $0x0479e3c4; WORD $0x4ed1 // vpermilps xmm2, xmm1, 78 + LONG $0xd857f1c5 // vxorpd xmm3, xmm1, xmm0 + LONG $0xc057e9c5 // vxorpd xmm0, xmm2, xmm0 + LONG $0x3779e2c4; BYTE $0xc3 // vpcmpgtq xmm0, xmm0, xmm3 + LONG $0x4b69e3c4; WORD $0x00c1 // vblendvpd xmm0, xmm2, xmm1, xmm0 + LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq rax, xmm0 + WORD $0x894c; BYTE $0xd6 // mov rsi, r10 + WORD $0x394d; BYTE $0xc1 // cmp r9, r8 + JE LBB7_8 + +LBB7_7: + LONG $0xcf348b4a // mov rsi, qword [rdi + 8*r9] + WORD $0x3948; BYTE $0xf0 // cmp rax, rsi + LONG $0xc6430f48 // cmovae rax, rsi + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + LONG $0xf2470f49 // cmova rsi, r10 + LONG $0x01c18349 // add r9, 1 + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + WORD $0x394d; BYTE $0xc8 // cmp r8, r9 + JNE LBB7_7 + +LBB7_8: + WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi + WORD $0x8948; BYTE $0x02 // mov qword [rdx], rax + VZEROUPPER + RET diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.go new file mode 100644 index 00000000..f9d3c44e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.go @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import "unsafe" + +// This file contains convenience functions for utilizing Arm64 Neon intrinsics to quickly +// and efficiently get the min and max from an integral slice. + +//go:noescape +func _int32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int32MaxMinNEON(values []int32) (min, max int32) { + _int32_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint32MaxMinNEON(values []uint32) (min, max uint32) { + _uint32_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int64MaxMinNEON(values []int64) (min, max int64) { + _int64_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint64MaxMinNEON(values []uint64) (min, max uint64) { + _uint64_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.s b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.s new file mode 100644 index 00000000..b679bb6e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.s @@ -0,0 +1,324 @@ +//+build !noasm !appengine + +// ARROW-15336 +// (C2GOASM doesn't work correctly for Arm64) +// Partly GENERATED BY asm2plan9s. + + +// func _int32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) +TEXT ยท_int32_max_min_neon(SB), $0-32 + + MOVD values+0(FP), R0 + MOVD length+8(FP), R1 + MOVD minout+16(FP), R2 + MOVD maxout+24(FP), R3 + + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x7100043f // cmp w1, #1 + WORD $0x910003fd // mov x29, sp + BLT LBB0_3 + + WORD $0x71000c3f // cmp w1, #3 + WORD $0x2a0103e8 // mov w8, w1 + BHI LBB0_4 + + WORD $0xaa1f03e9 // mov x9, xzr + WORD $0x52b0000b // mov w11, #-2147483648 + WORD $0x12b0000a // mov w10, #2147483647 + JMP LBB0_7 +LBB0_3: + WORD $0x12b0000a // mov w10, #2147483647 + WORD $0x52b0000b // mov w11, #-2147483648 + WORD $0xb900006b // str w11, [x3] + WORD $0xb900004a // str w10, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET +LBB0_4: + WORD $0x927e7509 // and x9, x8, #0xfffffffc + WORD $0x9100200a // add x10, x0, #8 + WORD $0x0f046402 // movi v2.2s, #128, lsl #24 + WORD $0x2f046400 // mvni v0.2s, #128, lsl #24 + WORD $0x2f046401 // mvni v1.2s, #128, lsl #24 + WORD $0xaa0903eb // mov x11, x9 + WORD $0x0f046403 // movi v3.2s, #128, lsl #24 +LBB0_5: + WORD $0x6d7f9544 // ldp d4, d5, [x10, #-8] + WORD $0xf100116b // subs x11, x11, #4 + WORD $0x9100414a // add x10, x10, #16 + WORD $0x0ea46c00 // smin v0.2s, v0.2s, v4.2s + WORD $0x0ea56c21 // smin v1.2s, v1.2s, v5.2s + WORD $0x0ea46442 // smax v2.2s, v2.2s, v4.2s + WORD $0x0ea56463 // smax v3.2s, v3.2s, v5.2s + BNE LBB0_5 + + WORD $0x0ea36442 // smax v2.2s, v2.2s, v3.2s + WORD $0x0ea16c00 // smin v0.2s, v0.2s, v1.2s + WORD $0x0e0c0441 // dup v1.2s, v2.s[1] + WORD $0x0e0c0403 // dup v3.2s, v0.s[1] + WORD $0x0ea16441 // smax v1.2s, v2.2s, v1.2s + WORD $0x0ea36c00 // smin v0.2s, v0.2s, v3.2s + WORD $0xeb08013f // cmp x9, x8 + WORD $0x1e26002b // fmov w11, s1 + WORD $0x1e26000a // fmov w10, s0 + BEQ LBB0_9 +LBB0_7: + WORD $0x8b09080c // add x12, x0, x9, lsl #2 + WORD $0xcb090108 // sub x8, x8, x9 +LBB0_8: + WORD $0xb8404589 // ldr w9, [x12], #4 + WORD $0x6b09015f // cmp w10, w9 + WORD $0x1a89b14a // csel w10, w10, w9, lt + WORD $0x6b09017f // cmp w11, w9 + WORD $0x1a89c16b // csel w11, w11, w9, gt + WORD $0xf1000508 // subs x8, x8, #1 + BNE LBB0_8 +LBB0_9: + WORD $0xb900006b // str w11, [x3] + WORD $0xb900004a // str w10, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET + +// func _uint32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) +TEXT ยท_uint32_max_min_neon(SB), $0-32 + + MOVD values+0(FP), R0 + MOVD length+8(FP), R1 + MOVD minout+16(FP), R2 + MOVD maxout+24(FP), R3 + + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x7100043f // cmp w1, #1 + WORD $0x910003fd // mov x29, sp + BLT LBB1_3 + + WORD $0x71000c3f // cmp w1, #3 + WORD $0x2a0103e8 // mov w8, w1 + BHI LBB1_4 + + WORD $0xaa1f03e9 // mov x9, xzr + WORD $0x2a1f03ea // mov w10, wzr + WORD $0x1280000b // mov w11, #-1 + JMP LBB1_7 +LBB1_3: + WORD $0x2a1f03ea // mov w10, wzr + WORD $0x1280000b // mov w11, #-1 + WORD $0xb900006a // str w10, [x3] + WORD $0xb900004b // str w11, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET +LBB1_4: + WORD $0x927e7509 // and x9, x8, #0xfffffffc + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x6f07e7e0 // movi v0.2d, #0xffffffffffffffff + WORD $0x9100200a // add x10, x0, #8 + WORD $0x6f07e7e2 // movi v2.2d, #0xffffffffffffffff + WORD $0xaa0903eb // mov x11, x9 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 +LBB1_5: + WORD $0x6d7f9544 // ldp d4, d5, [x10, #-8] + WORD $0xf100116b // subs x11, x11, #4 + WORD $0x9100414a // add x10, x10, #16 + WORD $0x2ea46c00 // umin v0.2s, v0.2s, v4.2s + WORD $0x2ea56c42 // umin v2.2s, v2.2s, v5.2s + WORD $0x2ea46421 // umax v1.2s, v1.2s, v4.2s + WORD $0x2ea56463 // umax v3.2s, v3.2s, v5.2s + BNE LBB1_5 + + WORD $0x2ea36421 // umax v1.2s, v1.2s, v3.2s + WORD $0x2ea26c00 // umin v0.2s, v0.2s, v2.2s + WORD $0x0e0c0422 // dup v2.2s, v1.s[1] + WORD $0x0e0c0403 // dup v3.2s, v0.s[1] + WORD $0x2ea26421 // umax v1.2s, v1.2s, v2.2s + WORD $0x2ea36c00 // umin v0.2s, v0.2s, v3.2s + WORD $0xeb08013f // cmp x9, x8 + WORD $0x1e26002a // fmov w10, s1 + WORD $0x1e26000b // fmov w11, s0 + BEQ LBB1_9 +LBB1_7: + WORD $0x8b09080c // add x12, x0, x9, lsl #2 + WORD $0xcb090108 // sub x8, x8, x9 +LBB1_8: + WORD $0xb8404589 // ldr w9, [x12], #4 + WORD $0x6b09017f // cmp w11, w9 + WORD $0x1a89316b // csel w11, w11, w9, lo + WORD $0x6b09015f // cmp w10, w9 + WORD $0x1a89814a // csel w10, w10, w9, hi + WORD $0xf1000508 // subs x8, x8, #1 + BNE LBB1_8 +LBB1_9: + WORD $0xb900006a // str w10, [x3] + WORD $0xb900004b // str w11, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET + +// func _int64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) +TEXT ยท_int64_max_min_neon(SB), $0-32 + + MOVD values+0(FP), R0 + MOVD length+8(FP), R1 + MOVD minout+16(FP), R2 + MOVD maxout+24(FP), R3 + + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x7100043f // cmp w1, #1 + WORD $0x910003fd // mov x29, sp + BLT LBB2_3 + + WORD $0x2a0103e8 // mov w8, w1 + WORD $0xd2f0000b // mov x11, #-9223372036854775808 + WORD $0x71000c3f // cmp w1, #3 + WORD $0x92f0000a // mov x10, #9223372036854775807 + BHI LBB2_4 + + WORD $0xaa1f03e9 // mov x9, xzr + JMP LBB2_7 +LBB2_3: + WORD $0x92f0000a // mov x10, #9223372036854775807 + WORD $0xd2f0000b // mov x11, #-9223372036854775808 + WORD $0xf900006b // str x11, [x3] + WORD $0xf900004a // str x10, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET +LBB2_4: + WORD $0x927e7509 // and x9, x8, #0xfffffffc + WORD $0x4e080d61 // dup v1.2d, x11 + WORD $0x4e080d40 // dup v0.2d, x10 + WORD $0x9100400a // add x10, x0, #16 + WORD $0xaa0903eb // mov x11, x9 + WORD $0x4ea01c02 // mov v2.16b, v0.16b + WORD $0x4ea11c23 // mov v3.16b, v1.16b +LBB2_5: + WORD $0xad7f9544 // ldp q4, q5, [x10, #-16] + WORD $0x4ea31c66 // mov v6.16b, v3.16b + WORD $0x4ea11c27 // mov v7.16b, v1.16b + WORD $0x4ea21c43 // mov v3.16b, v2.16b + WORD $0x4ea01c01 // mov v1.16b, v0.16b + WORD $0x4ee03480 // cmgt v0.2d, v4.2d, v0.2d + WORD $0x4ee234a2 // cmgt v2.2d, v5.2d, v2.2d + WORD $0x6e641c20 // bsl v0.16b, v1.16b, v4.16b + WORD $0x4ee434e1 // cmgt v1.2d, v7.2d, v4.2d + WORD $0x6e651c62 // bsl v2.16b, v3.16b, v5.16b + WORD $0x4ee534c3 // cmgt v3.2d, v6.2d, v5.2d + WORD $0xf100116b // subs x11, x11, #4 + WORD $0x6e641ce1 // bsl v1.16b, v7.16b, v4.16b + WORD $0x6e651cc3 // bsl v3.16b, v6.16b, v5.16b + WORD $0x9100814a // add x10, x10, #32 + BNE LBB2_5 + + WORD $0x4ee33424 // cmgt v4.2d, v1.2d, v3.2d + WORD $0x4ee03445 // cmgt v5.2d, v2.2d, v0.2d + WORD $0x6e631c24 // bsl v4.16b, v1.16b, v3.16b + WORD $0x6e621c05 // bsl v5.16b, v0.16b, v2.16b + WORD $0x4e180480 // dup v0.2d, v4.d[1] + WORD $0x4e1804a1 // dup v1.2d, v5.d[1] + WORD $0x4ee03482 // cmgt v2.2d, v4.2d, v0.2d + WORD $0x4ee53423 // cmgt v3.2d, v1.2d, v5.2d + WORD $0x6e601c82 // bsl v2.16b, v4.16b, v0.16b + WORD $0x6e611ca3 // bsl v3.16b, v5.16b, v1.16b + WORD $0xeb08013f // cmp x9, x8 + WORD $0x9e66004b // fmov x11, d2 + WORD $0x9e66006a // fmov x10, d3 + BEQ LBB2_9 +LBB2_7: + WORD $0x8b090c0c // add x12, x0, x9, lsl #3 + WORD $0xcb090108 // sub x8, x8, x9 +LBB2_8: + WORD $0xf8408589 // ldr x9, [x12], #8 + WORD $0xeb09015f // cmp x10, x9 + WORD $0x9a89b14a // csel x10, x10, x9, lt + WORD $0xeb09017f // cmp x11, x9 + WORD $0x9a89c16b // csel x11, x11, x9, gt + WORD $0xf1000508 // subs x8, x8, #1 + BNE LBB2_8 +LBB2_9: + WORD $0xf900006b // str x11, [x3] + WORD $0xf900004a // str x10, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET + + +// func _uint64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) +TEXT ยท_uint64_max_min_neon(SB), $0-32 + + MOVD values+0(FP), R0 + MOVD length+8(FP), R1 + MOVD minout+16(FP), R2 + MOVD maxout+24(FP), R3 + + WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! + WORD $0x7100043f // cmp w1, #1 + WORD $0x910003fd // mov x29, sp + BLT LBB3_3 + + WORD $0x71000c3f // cmp w1, #3 + WORD $0x2a0103e8 // mov w8, w1 + BHI LBB3_4 + + WORD $0xaa1f03e9 // mov x9, xzr + WORD $0xaa1f03ea // mov x10, xzr + WORD $0x9280000b // mov x11, #-1 + JMP LBB3_7 +LBB3_3: + WORD $0xaa1f03ea // mov x10, xzr + WORD $0x9280000b // mov x11, #-1 + WORD $0xf900006a // str x10, [x3] + WORD $0xf900004b // str x11, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET +LBB3_4: + WORD $0x927e7509 // and x9, x8, #0xfffffffc + WORD $0x9100400a // add x10, x0, #16 + WORD $0x6f00e401 // movi v1.2d, #0000000000000000 + WORD $0x6f07e7e0 // movi v0.2d, #0xffffffffffffffff + WORD $0x6f07e7e2 // movi v2.2d, #0xffffffffffffffff + WORD $0xaa0903eb // mov x11, x9 + WORD $0x6f00e403 // movi v3.2d, #0000000000000000 +LBB3_5: + WORD $0xad7f9544 // ldp q4, q5, [x10, #-16] + WORD $0x4ea31c66 // mov v6.16b, v3.16b + WORD $0x4ea11c27 // mov v7.16b, v1.16b + WORD $0x4ea21c43 // mov v3.16b, v2.16b + WORD $0x4ea01c01 // mov v1.16b, v0.16b + WORD $0x6ee03480 // cmhi v0.2d, v4.2d, v0.2d + WORD $0x6ee234a2 // cmhi v2.2d, v5.2d, v2.2d + WORD $0x6e641c20 // bsl v0.16b, v1.16b, v4.16b + WORD $0x6ee434e1 // cmhi v1.2d, v7.2d, v4.2d + WORD $0x6e651c62 // bsl v2.16b, v3.16b, v5.16b + WORD $0x6ee534c3 // cmhi v3.2d, v6.2d, v5.2d + WORD $0xf100116b // subs x11, x11, #4 + WORD $0x6e641ce1 // bsl v1.16b, v7.16b, v4.16b + WORD $0x6e651cc3 // bsl v3.16b, v6.16b, v5.16b + WORD $0x9100814a // add x10, x10, #32 + BNE LBB3_5 + + WORD $0x6ee33424 // cmhi v4.2d, v1.2d, v3.2d + WORD $0x6ee03445 // cmhi v5.2d, v2.2d, v0.2d + WORD $0x6e631c24 // bsl v4.16b, v1.16b, v3.16b + WORD $0x6e621c05 // bsl v5.16b, v0.16b, v2.16b + WORD $0x4e180480 // dup v0.2d, v4.d[1] + WORD $0x4e1804a1 // dup v1.2d, v5.d[1] + WORD $0x6ee03482 // cmhi v2.2d, v4.2d, v0.2d + WORD $0x6ee53423 // cmhi v3.2d, v1.2d, v5.2d + WORD $0x6e601c82 // bsl v2.16b, v4.16b, v0.16b + WORD $0x6e611ca3 // bsl v3.16b, v5.16b, v1.16b + WORD $0xeb08013f // cmp x9, x8 + WORD $0x9e66004a // fmov x10, d2 + WORD $0x9e66006b // fmov x11, d3 + BEQ LBB3_9 +LBB3_7: + WORD $0x8b090c0c // add x12, x0, x9, lsl #3 + WORD $0xcb090108 // sub x8, x8, x9 +LBB3_8: + WORD $0xf8408589 // ldr x9, [x12], #8 + WORD $0xeb09017f // cmp x11, x9 + WORD $0x9a89316b // csel x11, x11, x9, lo + WORD $0xeb09015f // cmp x10, x9 + WORD $0x9a89814a // csel x10, x10, x9, hi + WORD $0xf1000508 // subs x8, x8, #1 + BNE LBB3_8 +LBB3_9: + WORD $0xf900006a // str x10, [x3] + WORD $0xf900004b // str x11, [x2] + WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + RET + diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_noasm.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_noasm.go new file mode 100644 index 00000000..19c24b59 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_noasm.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +func init() { + minmaxFuncs.i8 = int8MinMax + minmaxFuncs.ui8 = uint8MinMax + minmaxFuncs.i16 = int16MinMax + minmaxFuncs.ui16 = uint16MinMax + minmaxFuncs.i32 = int32MinMax + minmaxFuncs.ui32 = uint32MinMax + minmaxFuncs.i64 = int64MinMax + minmaxFuncs.ui64 = uint64MinMax +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_ppc64le.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_ppc64le.go new file mode 100644 index 00000000..ffd2db00 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_ppc64le.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +func init() { + minmaxFuncs.i8 = int8MinMax + minmaxFuncs.ui8 = uint8MinMax + minmaxFuncs.i16 = int16MinMax + minmaxFuncs.ui16 = uint16MinMax + minmaxFuncs.i32 = int32MinMax + minmaxFuncs.ui32 = uint32MinMax + minmaxFuncs.i64 = int64MinMax + minmaxFuncs.ui64 = uint64MinMax +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_s390x.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_s390x.go new file mode 100644 index 00000000..ffd2db00 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_s390x.go @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +func init() { + minmaxFuncs.i8 = int8MinMax + minmaxFuncs.ui8 = uint8MinMax + minmaxFuncs.i16 = int16MinMax + minmaxFuncs.ui16 = uint16MinMax + minmaxFuncs.i32 = int32MinMax + minmaxFuncs.ui32 = uint32MinMax + minmaxFuncs.i64 = int64MinMax + minmaxFuncs.ui64 = uint64MinMax +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.go new file mode 100644 index 00000000..1e12a8d1 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.go @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import "unsafe" + +// This file contains convenience functions for utilizing SSE4 intrinsics to quickly +// and efficiently get the min and max from an integral slice. + +//go:noescape +func _int8_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int8MaxMinSSE4(values []int8) (min, max int8) { + _int8_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint8_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint8MaxMinSSE4(values []uint8) (min, max uint8) { + _uint8_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int16_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int16MaxMinSSE4(values []int16) (min, max int16) { + _int16_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint16_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint16MaxMinSSE4(values []uint16) (min, max uint16) { + _uint16_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int32_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int32MaxMinSSE4(values []int32) (min, max int32) { + _int32_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint32_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint32MaxMinSSE4(values []uint32) (min, max uint32) { + _uint32_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _int64_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func int64MaxMinSSE4(values []int64) (min, max int64) { + _int64_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} + +//go:noescape +func _uint64_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) + +func uint64MaxMinSSE4(values []uint64) (min, max uint64) { + _uint64_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max)) + return +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.s new file mode 100644 index 00000000..8f1eccf6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.s @@ -0,0 +1,1044 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +DATA LCDATA1<>+0x000(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x008(SB)/8, $0x8080808080808080 +DATA LCDATA1<>+0x010(SB)/8, $0x7f7f7f7f7f7f7f7f +DATA LCDATA1<>+0x018(SB)/8, $0x7f7f7f7f7f7f7f7f +GLOBL LCDATA1<>(SB), 8, $32 + +TEXT ยท_int8_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA1<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB0_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB0_4 + WORD $0xb041; BYTE $0x80 // mov r8b, -128 + WORD $0xb640; BYTE $0x7f // mov sil, 127 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + JMP LBB0_11 + +LBB0_1: + WORD $0xb640; BYTE $0x7f // mov sil, 127 + WORD $0xb041; BYTE $0x80 // mov r8b, -128 + JMP LBB0_12 + +LBB0_4: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xe0e38341 // and r11d, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x05e8c149 // shr r8, 5 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB0_5 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI0_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI0_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + +LBB0_7: + LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax] + LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16] + LONG $0x746f0ff3; WORD $0x2007 // movdqu xmm6, oword [rdi + rax + 32] + LONG $0x7c6f0ff3; WORD $0x3007 // movdqu xmm7, oword [rdi + rax + 48] + LONG $0x38380f66; BYTE $0xc4 // pminsb xmm0, xmm4 + LONG $0x38380f66; BYTE $0xd5 // pminsb xmm2, xmm5 + LONG $0x3c380f66; BYTE $0xcc // pmaxsb xmm1, xmm4 + LONG $0x3c380f66; BYTE $0xdd // pmaxsb xmm3, xmm5 + LONG $0x38380f66; BYTE $0xc6 // pminsb xmm0, xmm6 + LONG $0x38380f66; BYTE $0xd7 // pminsb xmm2, xmm7 + LONG $0x3c380f66; BYTE $0xce // pmaxsb xmm1, xmm6 + LONG $0x3c380f66; BYTE $0xdf // pmaxsb xmm3, xmm7 + LONG $0x40c08348 // add rax, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB0_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB0_10 + +LBB0_9: + LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax] + LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16] + LONG $0x3c380f66; BYTE $0xdd // pmaxsb xmm3, xmm5 + LONG $0x3c380f66; BYTE $0xcc // pmaxsb xmm1, xmm4 + LONG $0x38380f66; BYTE $0xd5 // pminsb xmm2, xmm5 + LONG $0x38380f66; BYTE $0xc4 // pminsb xmm0, xmm4 + +LBB0_10: + LONG $0x38380f66; BYTE $0xc2 // pminsb xmm0, xmm2 + LONG $0x3c380f66; BYTE $0xcb // pmaxsb xmm1, xmm3 + LONG $0x4def0f66; BYTE $0x10 // pxor xmm1, oword 16[rbp] /* [rip + .LCPI0_1] */ + LONG $0xd16f0f66 // movdqa xmm2, xmm1 + LONG $0xd2710f66; BYTE $0x08 // psrlw xmm2, 8 + LONG $0xd1da0f66 // pminub xmm2, xmm1 + LONG $0x41380f66; BYTE $0xca // phminposuw xmm1, xmm2 + LONG $0x7e0f4166; BYTE $0xc8 // movd r8d, xmm1 + LONG $0x7ff08041 // xor r8b, 127 + LONG $0x45ef0f66; BYTE $0x00 // pxor xmm0, oword 0[rbp] /* [rip + .LCPI0_0] */ + LONG $0xc86f0f66 // movdqa xmm1, xmm0 + LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8 + LONG $0xc8da0f66 // pminub xmm1, xmm0 + LONG $0x41380f66; BYTE $0xc1 // phminposuw xmm0, xmm1 + LONG $0xc67e0f66 // movd esi, xmm0 + LONG $0x80f68040 // xor sil, -128 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB0_12 + +LBB0_11: + LONG $0x04b60f42; BYTE $0x1f // movzx eax, byte [rdi + r11] + WORD $0x3840; BYTE $0xc6 // cmp sil, al + LONG $0xf6b60f40 // movzx esi, sil + WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax + WORD $0x3841; BYTE $0xc0 // cmp r8b, al + LONG $0xc0b60f45 // movzx r8d, r8b + LONG $0xc04c0f44 // cmovl r8d, eax + LONG $0x01c38349 // add r11, 1 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB0_11 + +LBB0_12: + WORD $0x8844; BYTE $0x01 // mov byte [rcx], r8b + WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil + RET + +LBB0_5: + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI0_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI0_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB0_9 + JMP LBB0_10 + +TEXT ยท_uint8_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB1_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x1f // cmp esi, 31 + JA LBB1_4 + WORD $0xb640; BYTE $0xff // mov sil, -1 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0xc031 // xor eax, eax + JMP LBB1_11 + +LBB1_1: + WORD $0xb640; BYTE $0xff // mov sil, -1 + WORD $0xc031 // xor eax, eax + JMP LBB1_12 + +LBB1_4: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xe0e38341 // and r11d, -32 + LONG $0xe0438d49 // lea rax, [r11 - 32] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x05e8c149 // shr r8, 5 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB1_5 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + +LBB1_7: + LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax] + LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16] + LONG $0x746f0ff3; WORD $0x2007 // movdqu xmm6, oword [rdi + rax + 32] + LONG $0x7c6f0ff3; WORD $0x3007 // movdqu xmm7, oword [rdi + rax + 48] + LONG $0xc4da0f66 // pminub xmm0, xmm4 + LONG $0xd5da0f66 // pminub xmm2, xmm5 + LONG $0xccde0f66 // pmaxub xmm1, xmm4 + LONG $0xddde0f66 // pmaxub xmm3, xmm5 + LONG $0xc6da0f66 // pminub xmm0, xmm6 + LONG $0xd7da0f66 // pminub xmm2, xmm7 + LONG $0xcede0f66 // pmaxub xmm1, xmm6 + LONG $0xdfde0f66 // pmaxub xmm3, xmm7 + LONG $0x40c08348 // add rax, 64 + LONG $0x02c28349 // add r10, 2 + JNE LBB1_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB1_10 + +LBB1_9: + LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax] + LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16] + LONG $0xddde0f66 // pmaxub xmm3, xmm5 + LONG $0xccde0f66 // pmaxub xmm1, xmm4 + LONG $0xd5da0f66 // pminub xmm2, xmm5 + LONG $0xc4da0f66 // pminub xmm0, xmm4 + +LBB1_10: + LONG $0xc2da0f66 // pminub xmm0, xmm2 + LONG $0xcbde0f66 // pmaxub xmm1, xmm3 + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xd1ef0f66 // pxor xmm2, xmm1 + LONG $0xca6f0f66 // movdqa xmm1, xmm2 + LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8 + LONG $0xcada0f66 // pminub xmm1, xmm2 + LONG $0x41380f66; BYTE $0xc9 // phminposuw xmm1, xmm1 + LONG $0xc87e0f66 // movd eax, xmm1 + WORD $0xd0f6 // not al + LONG $0xc86f0f66 // movdqa xmm1, xmm0 + LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8 + LONG $0xc8da0f66 // pminub xmm1, xmm0 + LONG $0x41380f66; BYTE $0xc1 // phminposuw xmm0, xmm1 + LONG $0xc67e0f66 // movd esi, xmm0 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB1_12 + +LBB1_11: + LONG $0x04b60f46; BYTE $0x1f // movzx r8d, byte [rdi + r11] + WORD $0x3844; BYTE $0xc6 // cmp sil, r8b + LONG $0xf6b60f40 // movzx esi, sil + LONG $0xf0430f41 // cmovae esi, r8d + WORD $0x3844; BYTE $0xc0 // cmp al, r8b + WORD $0xb60f; BYTE $0xc0 // movzx eax, al + LONG $0xc0460f41 // cmovbe eax, r8d + LONG $0x01c38349 // add r11, 1 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB1_11 + +LBB1_12: + WORD $0x0188 // mov byte [rcx], al + WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil + RET + +LBB1_5: + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB1_9 + JMP LBB1_10 + +DATA LCDATA2<>+0x000(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x008(SB)/8, $0x8000800080008000 +DATA LCDATA2<>+0x010(SB)/8, $0x7fff7fff7fff7fff +DATA LCDATA2<>+0x018(SB)/8, $0x7fff7fff7fff7fff +GLOBL LCDATA2<>(SB), 8, $32 + +TEXT ยท_int16_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA2<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB2_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x0f // cmp esi, 15 + JA LBB2_4 + LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768 + LONG $0x7fffbe66 // mov si, 32767 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + JMP LBB2_11 + +LBB2_1: + LONG $0x7fffbe66 // mov si, 32767 + LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768 + JMP LBB2_12 + +LBB2_4: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xf0e38341 // and r11d, -16 + LONG $0xf0438d49 // lea rax, [r11 - 16] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x04e8c149 // shr r8, 4 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB2_5 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI2_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI2_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + +LBB2_7: + LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax] + LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16] + LONG $0x746f0ff3; WORD $0x2047 // movdqu xmm6, oword [rdi + 2*rax + 32] + LONG $0x7c6f0ff3; WORD $0x3047 // movdqu xmm7, oword [rdi + 2*rax + 48] + LONG $0xc4ea0f66 // pminsw xmm0, xmm4 + LONG $0xd5ea0f66 // pminsw xmm2, xmm5 + LONG $0xccee0f66 // pmaxsw xmm1, xmm4 + LONG $0xddee0f66 // pmaxsw xmm3, xmm5 + LONG $0xc6ea0f66 // pminsw xmm0, xmm6 + LONG $0xd7ea0f66 // pminsw xmm2, xmm7 + LONG $0xceee0f66 // pmaxsw xmm1, xmm6 + LONG $0xdfee0f66 // pmaxsw xmm3, xmm7 + LONG $0x20c08348 // add rax, 32 + LONG $0x02c28349 // add r10, 2 + JNE LBB2_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB2_10 + +LBB2_9: + LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax] + LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16] + LONG $0xddee0f66 // pmaxsw xmm3, xmm5 + LONG $0xccee0f66 // pmaxsw xmm1, xmm4 + LONG $0xd5ea0f66 // pminsw xmm2, xmm5 + LONG $0xc4ea0f66 // pminsw xmm0, xmm4 + +LBB2_10: + LONG $0xc2ea0f66 // pminsw xmm0, xmm2 + LONG $0xcbee0f66 // pmaxsw xmm1, xmm3 + LONG $0x4def0f66; BYTE $0x10 // pxor xmm1, oword 16[rbp] /* [rip + .LCPI2_1] */ + LONG $0x41380f66; BYTE $0xc9 // phminposuw xmm1, xmm1 + LONG $0x7e0f4166; BYTE $0xc8 // movd r8d, xmm1 + LONG $0xfff08141; WORD $0x007f; BYTE $0x00 // xor r8d, 32767 + LONG $0x45ef0f66; BYTE $0x00 // pxor xmm0, oword 0[rbp] /* [rip + .LCPI2_0] */ + LONG $0x41380f66; BYTE $0xc0 // phminposuw xmm0, xmm0 + LONG $0xc67e0f66 // movd esi, xmm0 + LONG $0x8000f681; WORD $0x0000 // xor esi, 32768 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB2_12 + +LBB2_11: + LONG $0x04b70f42; BYTE $0x5f // movzx eax, word [rdi + 2*r11] + WORD $0x3966; BYTE $0xc6 // cmp si, ax + WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax + LONG $0xc0394166 // cmp r8w, ax + LONG $0xc04c0f44 // cmovl r8d, eax + LONG $0x01c38349 // add r11, 1 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB2_11 + +LBB2_12: + LONG $0x01894466 // mov word [rcx], r8w + WORD $0x8966; BYTE $0x32 // mov word [rdx], si + RET + +LBB2_5: + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI2_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI2_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB2_9 + JMP LBB2_10 + +TEXT ยท_uint16_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB3_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x0f // cmp esi, 15 + JA LBB3_4 + LONG $0xffb84166; BYTE $0xff // mov r8w, -1 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0xf631 // xor esi, esi + JMP LBB3_11 + +LBB3_1: + LONG $0xffb84166; BYTE $0xff // mov r8w, -1 + WORD $0xf631 // xor esi, esi + JMP LBB3_12 + +LBB3_4: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xf0e38341 // and r11d, -16 + LONG $0xf0438d49 // lea rax, [r11 - 16] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x04e8c149 // shr r8, 4 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB3_5 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + +LBB3_7: + LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax] + LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16] + LONG $0x746f0ff3; WORD $0x2047 // movdqu xmm6, oword [rdi + 2*rax + 32] + LONG $0x7c6f0ff3; WORD $0x3047 // movdqu xmm7, oword [rdi + 2*rax + 48] + LONG $0x3a380f66; BYTE $0xc4 // pminuw xmm0, xmm4 + LONG $0x3a380f66; BYTE $0xd5 // pminuw xmm2, xmm5 + LONG $0x3e380f66; BYTE $0xcc // pmaxuw xmm1, xmm4 + LONG $0x3e380f66; BYTE $0xdd // pmaxuw xmm3, xmm5 + LONG $0x3a380f66; BYTE $0xc6 // pminuw xmm0, xmm6 + LONG $0x3a380f66; BYTE $0xd7 // pminuw xmm2, xmm7 + LONG $0x3e380f66; BYTE $0xce // pmaxuw xmm1, xmm6 + LONG $0x3e380f66; BYTE $0xdf // pmaxuw xmm3, xmm7 + LONG $0x20c08348 // add rax, 32 + LONG $0x02c28349 // add r10, 2 + JNE LBB3_7 + LONG $0x01c0f641 // test r8b, 1 + JE LBB3_10 + +LBB3_9: + LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax] + LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16] + LONG $0x3e380f66; BYTE $0xdd // pmaxuw xmm3, xmm5 + LONG $0x3e380f66; BYTE $0xcc // pmaxuw xmm1, xmm4 + LONG $0x3a380f66; BYTE $0xd5 // pminuw xmm2, xmm5 + LONG $0x3a380f66; BYTE $0xc4 // pminuw xmm0, xmm4 + +LBB3_10: + LONG $0x3a380f66; BYTE $0xc2 // pminuw xmm0, xmm2 + LONG $0x3e380f66; BYTE $0xcb // pmaxuw xmm1, xmm3 + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xd1ef0f66 // pxor xmm2, xmm1 + LONG $0x41380f66; BYTE $0xca // phminposuw xmm1, xmm2 + LONG $0xce7e0f66 // movd esi, xmm1 + WORD $0xd6f7 // not esi + LONG $0x41380f66; BYTE $0xc0 // phminposuw xmm0, xmm0 + LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB3_12 + +LBB3_11: + LONG $0x04b70f42; BYTE $0x5f // movzx eax, word [rdi + 2*r11] + LONG $0xc0394166 // cmp r8w, ax + LONG $0xc0430f44 // cmovae r8d, eax + WORD $0x3966; BYTE $0xc6 // cmp si, ax + WORD $0x460f; BYTE $0xf0 // cmovbe esi, eax + LONG $0x01c38349 // add r11, 1 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB3_11 + +LBB3_12: + WORD $0x8966; BYTE $0x31 // mov word [rcx], si + LONG $0x02894466 // mov word [rdx], r8w + RET + +LBB3_5: + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB3_9 + JMP LBB3_10 + +DATA LCDATA3<>+0x000(SB)/8, $0x8000000080000000 +DATA LCDATA3<>+0x008(SB)/8, $0x8000000080000000 +DATA LCDATA3<>+0x010(SB)/8, $0x7fffffff7fffffff +DATA LCDATA3<>+0x018(SB)/8, $0x7fffffff7fffffff +GLOBL LCDATA3<>(SB), 8, $32 + +TEXT ยท_int32_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA3<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB4_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x07 // cmp esi, 7 + JA LBB4_6 + LONG $0x000000b8; BYTE $0x80 // mov eax, -2147483648 + LONG $0xffffb841; WORD $0x7fff // mov r8d, 2147483647 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + JMP LBB4_4 + +LBB4_1: + LONG $0xffffb841; WORD $0x7fff // mov r8d, 2147483647 + LONG $0x000000b8; BYTE $0x80 // mov eax, -2147483648 + JMP LBB4_13 + +LBB4_6: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xf8e38341 // and r11d, -8 + LONG $0xf8438d49 // lea rax, [r11 - 8] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x03e8c149 // shr r8, 3 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB4_7 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI4_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI4_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + +LBB4_9: + LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax] + LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16] + LONG $0x746f0ff3; WORD $0x2087 // movdqu xmm6, oword [rdi + 4*rax + 32] + LONG $0x7c6f0ff3; WORD $0x3087 // movdqu xmm7, oword [rdi + 4*rax + 48] + LONG $0x39380f66; BYTE $0xc4 // pminsd xmm0, xmm4 + LONG $0x39380f66; BYTE $0xd5 // pminsd xmm2, xmm5 + LONG $0x3d380f66; BYTE $0xcc // pmaxsd xmm1, xmm4 + LONG $0x3d380f66; BYTE $0xdd // pmaxsd xmm3, xmm5 + LONG $0x39380f66; BYTE $0xc6 // pminsd xmm0, xmm6 + LONG $0x39380f66; BYTE $0xd7 // pminsd xmm2, xmm7 + LONG $0x3d380f66; BYTE $0xce // pmaxsd xmm1, xmm6 + LONG $0x3d380f66; BYTE $0xdf // pmaxsd xmm3, xmm7 + LONG $0x10c08348 // add rax, 16 + LONG $0x02c28349 // add r10, 2 + JNE LBB4_9 + LONG $0x01c0f641 // test r8b, 1 + JE LBB4_12 + +LBB4_11: + LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax] + LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16] + LONG $0x3d380f66; BYTE $0xdd // pmaxsd xmm3, xmm5 + LONG $0x3d380f66; BYTE $0xcc // pmaxsd xmm1, xmm4 + LONG $0x39380f66; BYTE $0xd5 // pminsd xmm2, xmm5 + LONG $0x39380f66; BYTE $0xc4 // pminsd xmm0, xmm4 + +LBB4_12: + LONG $0x39380f66; BYTE $0xc2 // pminsd xmm0, xmm2 + LONG $0x3d380f66; BYTE $0xcb // pmaxsd xmm1, xmm3 + LONG $0xd1700f66; BYTE $0x4e // pshufd xmm2, xmm1, 78 + LONG $0x3d380f66; BYTE $0xd1 // pmaxsd xmm2, xmm1 + LONG $0xca700f66; BYTE $0xe5 // pshufd xmm1, xmm2, 229 + LONG $0x3d380f66; BYTE $0xca // pmaxsd xmm1, xmm2 + LONG $0xc87e0f66 // movd eax, xmm1 + LONG $0xc8700f66; BYTE $0x4e // pshufd xmm1, xmm0, 78 + LONG $0x39380f66; BYTE $0xc8 // pminsd xmm1, xmm0 + LONG $0xc1700f66; BYTE $0xe5 // pshufd xmm0, xmm1, 229 + LONG $0x39380f66; BYTE $0xc1 // pminsd xmm0, xmm1 + LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB4_13 + +LBB4_4: + WORD $0xc689 // mov esi, eax + +LBB4_5: + LONG $0x9f048b42 // mov eax, dword [rdi + 4*r11] + WORD $0x3941; BYTE $0xc0 // cmp r8d, eax + LONG $0xc04f0f44 // cmovg r8d, eax + WORD $0xc639 // cmp esi, eax + WORD $0x4d0f; BYTE $0xc6 // cmovge eax, esi + LONG $0x01c38349 // add r11, 1 + WORD $0xc689 // mov esi, eax + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB4_5 + +LBB4_13: + WORD $0x0189 // mov dword [rcx], eax + WORD $0x8944; BYTE $0x02 // mov dword [rdx], r8d + RET + +LBB4_7: + LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI4_0] */ + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI4_1] */ + WORD $0xc031 // xor eax, eax + LONG $0xd06f0f66 // movdqa xmm2, xmm0 + LONG $0xd96f0f66 // movdqa xmm3, xmm1 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB4_11 + JMP LBB4_12 + +TEXT ยท_uint32_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + + WORD $0xf685 // test esi, esi + JLE LBB5_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x07 // cmp esi, 7 + JA LBB5_6 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0xffffb841; WORD $0xffff // mov r8d, -1 + WORD $0xf631 // xor esi, esi + JMP LBB5_4 + +LBB5_1: + LONG $0xffffb841; WORD $0xffff // mov r8d, -1 + WORD $0xf631 // xor esi, esi + JMP LBB5_13 + +LBB5_6: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xf8e38341 // and r11d, -8 + LONG $0xf8438d49 // lea rax, [r11 - 8] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x03e8c149 // shr r8, 3 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB5_7 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + +LBB5_9: + LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax] + LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16] + LONG $0x746f0ff3; WORD $0x2087 // movdqu xmm6, oword [rdi + 4*rax + 32] + LONG $0x7c6f0ff3; WORD $0x3087 // movdqu xmm7, oword [rdi + 4*rax + 48] + LONG $0x3b380f66; BYTE $0xc4 // pminud xmm0, xmm4 + LONG $0x3b380f66; BYTE $0xd5 // pminud xmm2, xmm5 + LONG $0x3f380f66; BYTE $0xcc // pmaxud xmm1, xmm4 + LONG $0x3f380f66; BYTE $0xdd // pmaxud xmm3, xmm5 + LONG $0x3b380f66; BYTE $0xc6 // pminud xmm0, xmm6 + LONG $0x3b380f66; BYTE $0xd7 // pminud xmm2, xmm7 + LONG $0x3f380f66; BYTE $0xce // pmaxud xmm1, xmm6 + LONG $0x3f380f66; BYTE $0xdf // pmaxud xmm3, xmm7 + LONG $0x10c08348 // add rax, 16 + LONG $0x02c28349 // add r10, 2 + JNE LBB5_9 + LONG $0x01c0f641 // test r8b, 1 + JE LBB5_12 + +LBB5_11: + LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax] + LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16] + LONG $0x3f380f66; BYTE $0xdd // pmaxud xmm3, xmm5 + LONG $0x3f380f66; BYTE $0xcc // pmaxud xmm1, xmm4 + LONG $0x3b380f66; BYTE $0xd5 // pminud xmm2, xmm5 + LONG $0x3b380f66; BYTE $0xc4 // pminud xmm0, xmm4 + +LBB5_12: + LONG $0x3b380f66; BYTE $0xc2 // pminud xmm0, xmm2 + LONG $0x3f380f66; BYTE $0xcb // pmaxud xmm1, xmm3 + LONG $0xd1700f66; BYTE $0x4e // pshufd xmm2, xmm1, 78 + LONG $0x3f380f66; BYTE $0xd1 // pmaxud xmm2, xmm1 + LONG $0xca700f66; BYTE $0xe5 // pshufd xmm1, xmm2, 229 + LONG $0x3f380f66; BYTE $0xca // pmaxud xmm1, xmm2 + LONG $0xce7e0f66 // movd esi, xmm1 + LONG $0xc8700f66; BYTE $0x4e // pshufd xmm1, xmm0, 78 + LONG $0x3b380f66; BYTE $0xc8 // pminud xmm1, xmm0 + LONG $0xc1700f66; BYTE $0xe5 // pshufd xmm0, xmm1, 229 + LONG $0x3b380f66; BYTE $0xc1 // pminud xmm0, xmm1 + LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB5_13 + +LBB5_4: + WORD $0xf089 // mov eax, esi + +LBB5_5: + LONG $0x9f348b42 // mov esi, dword [rdi + 4*r11] + WORD $0x3941; BYTE $0xf0 // cmp r8d, esi + LONG $0xc6430f44 // cmovae r8d, esi + WORD $0xf039 // cmp eax, esi + WORD $0x470f; BYTE $0xf0 // cmova esi, eax + LONG $0x01c38349 // add r11, 1 + WORD $0xf089 // mov eax, esi + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB5_5 + +LBB5_13: + WORD $0x3189 // mov dword [rcx], esi + WORD $0x8944; BYTE $0x02 // mov dword [rdx], r8d + RET + +LBB5_7: + LONG $0xc9ef0f66 // pxor xmm1, xmm1 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + WORD $0xc031 // xor eax, eax + LONG $0xd2760f66 // pcmpeqd xmm2, xmm2 + LONG $0xdbef0f66 // pxor xmm3, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB5_11 + JMP LBB5_12 + +DATA LCDATA4<>+0x000(SB)/8, $0x8000000000000000 +DATA LCDATA4<>+0x008(SB)/8, $0x8000000000000000 +DATA LCDATA4<>+0x010(SB)/8, $0x7fffffffffffffff +DATA LCDATA4<>+0x018(SB)/8, $0x7fffffffffffffff +GLOBL LCDATA4<>(SB), 8, $32 + +TEXT ยท_int64_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA4<>(SB), BP + + QUAD $0xffffffffffffb849; WORD $0x7fff // mov r8, 9223372036854775807 + WORD $0xf685 // test esi, esi + JLE LBB6_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x03 // cmp esi, 3 + JA LBB6_6 + LONG $0x01708d49 // lea rsi, [r8 + 1] + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + JMP LBB6_4 + +LBB6_1: + LONG $0x01708d49 // lea rsi, [r8 + 1] + JMP LBB6_13 + +LBB6_6: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xfce38341 // and r11d, -4 + LONG $0xfc438d49 // lea rax, [r11 - 4] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x02e8c149 // shr r8, 2 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB6_7 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0x6f0f4466; WORD $0x004d // movdqa xmm9, oword 0[rbp] /* [rip + .LCPI6_0] */ + LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI6_1] */ + WORD $0xc031 // xor eax, eax + LONG $0x6f0f4166; BYTE $0xd0 // movdqa xmm2, xmm8 + LONG $0x6f0f4166; BYTE $0xf1 // movdqa xmm6, xmm9 + +LBB6_9: + LONG $0x3c6f0ff3; BYTE $0xc7 // movdqu xmm7, oword [rdi + 8*rax] + LONG $0xc76f0f66 // movdqa xmm0, xmm7 + LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8 + LONG $0xe76f0f66 // movdqa xmm4, xmm7 + LONG $0x380f4166; WORD $0xe015 // blendvpd xmm4, xmm8, xmm0 + LONG $0x4c6f0ff3; WORD $0x10c7 // movdqu xmm1, oword [rdi + 8*rax + 16] + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0xe96f0f66 // movdqa xmm5, xmm1 + LONG $0x15380f66; BYTE $0xea // blendvpd xmm5, xmm2, xmm0 + LONG $0x6f0f4166; BYTE $0xc1 // movdqa xmm0, xmm9 + LONG $0x37380f66; BYTE $0xc7 // pcmpgtq xmm0, xmm7 + LONG $0x380f4166; WORD $0xf915 // blendvpd xmm7, xmm9, xmm0 + LONG $0xc66f0f66 // movdqa xmm0, xmm6 + LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1 + LONG $0x15380f66; BYTE $0xce // blendvpd xmm1, xmm6, xmm0 + LONG $0x5c6f0ff3; WORD $0x20c7 // movdqu xmm3, oword [rdi + 8*rax + 32] + LONG $0xc36f0f66 // movdqa xmm0, xmm3 + LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4 + LONG $0x6f0f4466; BYTE $0xc3 // movdqa xmm8, xmm3 + LONG $0x380f4466; WORD $0xc415 // blendvpd xmm8, xmm4, xmm0 + LONG $0x646f0ff3; WORD $0x30c7 // movdqu xmm4, oword [rdi + 8*rax + 48] + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0x37380f66; BYTE $0xc5 // pcmpgtq xmm0, xmm5 + LONG $0xd46f0f66 // movdqa xmm2, xmm4 + LONG $0x15380f66; BYTE $0xd5 // blendvpd xmm2, xmm5, xmm0 + LONG $0xc7280f66 // movapd xmm0, xmm7 + LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3 + LONG $0x15380f66; BYTE $0xdf // blendvpd xmm3, xmm7, xmm0 + LONG $0xc1280f66 // movapd xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4 + LONG $0x15380f66; BYTE $0xe1 // blendvpd xmm4, xmm1, xmm0 + LONG $0x08c08348 // add rax, 8 + LONG $0x280f4466; BYTE $0xcb // movapd xmm9, xmm3 + LONG $0xf4280f66 // movapd xmm6, xmm4 + LONG $0x02c28349 // add r10, 2 + JNE LBB6_9 + LONG $0x01c0f641 // test r8b, 1 + JE LBB6_12 + +LBB6_11: + LONG $0x4c6f0ff3; WORD $0x10c7 // movdqu xmm1, oword [rdi + 8*rax + 16] + LONG $0xc4280f66 // movapd xmm0, xmm4 + LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1 + LONG $0xe96f0f66 // movdqa xmm5, xmm1 + LONG $0x15380f66; BYTE $0xec // blendvpd xmm5, xmm4, xmm0 + LONG $0x246f0ff3; BYTE $0xc7 // movdqu xmm4, oword [rdi + 8*rax] + LONG $0xc3280f66 // movapd xmm0, xmm3 + LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4 + LONG $0xf46f0f66 // movdqa xmm6, xmm4 + LONG $0x15380f66; BYTE $0xf3 // blendvpd xmm6, xmm3, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0x15380f66; BYTE $0xca // blendvpd xmm1, xmm2, xmm0 + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8 + LONG $0x380f4166; WORD $0xe015 // blendvpd xmm4, xmm8, xmm0 + LONG $0x280f4466; BYTE $0xc4 // movapd xmm8, xmm4 + LONG $0xd1280f66 // movapd xmm2, xmm1 + LONG $0xde280f66 // movapd xmm3, xmm6 + LONG $0xe5280f66 // movapd xmm4, xmm5 + +LBB6_12: + LONG $0xc3280f66 // movapd xmm0, xmm3 + LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4 + LONG $0x15380f66; BYTE $0xe3 // blendvpd xmm4, xmm3, xmm0 + LONG $0xcc700f66; BYTE $0x4e // pshufd xmm1, xmm4, 78 + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1 + LONG $0x15380f66; BYTE $0xcc // blendvpd xmm1, xmm4, xmm0 + LONG $0x7e0f4866; BYTE $0xce // movq rsi, xmm1 + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8 + LONG $0x380f4166; WORD $0xd015 // blendvpd xmm2, xmm8, xmm0 + LONG $0xca700f66; BYTE $0x4e // pshufd xmm1, xmm2, 78 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0x15380f66; BYTE $0xca // blendvpd xmm1, xmm2, xmm0 + LONG $0x7e0f4966; BYTE $0xc8 // movq r8, xmm1 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB6_13 + +LBB6_4: + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + +LBB6_5: + LONG $0xdf348b4a // mov rsi, qword [rdi + 8*r11] + WORD $0x3949; BYTE $0xf0 // cmp r8, rsi + LONG $0xc64f0f4c // cmovg r8, rsi + WORD $0x3948; BYTE $0xf0 // cmp rax, rsi + LONG $0xf04d0f48 // cmovge rsi, rax + LONG $0x01c38349 // add r11, 1 + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB6_5 + +LBB6_13: + WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi + WORD $0x894c; BYTE $0x02 // mov qword [rdx], r8 + RET + +LBB6_7: + LONG $0x5d280f66; BYTE $0x00 // movapd xmm3, oword 0[rbp] /* [rip + .LCPI6_0] */ + LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI6_1] */ + WORD $0xc031 // xor eax, eax + LONG $0x6f0f4166; BYTE $0xd0 // movdqa xmm2, xmm8 + LONG $0xe3280f66 // movapd xmm4, xmm3 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB6_11 + JMP LBB6_12 + +DATA LCDATA5<>+0x000(SB)/8, $0x8000000000000000 +DATA LCDATA5<>+0x008(SB)/8, $0x8000000000000000 +GLOBL LCDATA5<>(SB), 8, $16 + +TEXT ยท_uint64_max_min_sse4(SB), $0-32 + + MOVQ values+0(FP), DI + MOVQ length+8(FP), SI + MOVQ minout+16(FP), DX + MOVQ maxout+24(FP), CX + LEAQ LCDATA5<>(SB), BP + + WORD $0xf685 // test esi, esi + JLE LBB7_1 + WORD $0x8941; BYTE $0xf1 // mov r9d, esi + WORD $0xfe83; BYTE $0x03 // cmp esi, 3 + JA LBB7_6 + LONG $0xffc0c749; WORD $0xffff; BYTE $0xff // mov r8, -1 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0xc031 // xor eax, eax + JMP LBB7_4 + +LBB7_1: + LONG $0xffc0c749; WORD $0xffff; BYTE $0xff // mov r8, -1 + WORD $0xc031 // xor eax, eax + JMP LBB7_13 + +LBB7_6: + WORD $0x8945; BYTE $0xcb // mov r11d, r9d + LONG $0xfce38341 // and r11d, -4 + LONG $0xfc438d49 // lea rax, [r11 - 4] + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x02e8c149 // shr r8, 2 + LONG $0x01c08349 // add r8, 1 + WORD $0x8548; BYTE $0xc0 // test rax, rax + JE LBB7_7 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf749; BYTE $0xda // neg r10 + LONG $0xef0f4566; BYTE $0xc9 // pxor xmm9, xmm9 + LONG $0x760f4566; BYTE $0xd2 // pcmpeqd xmm10, xmm10 + WORD $0xc031 // xor eax, eax + LONG $0x6f0f4466; WORD $0x0045 // movdqa xmm8, oword 0[rbp] /* [rip + .LCPI7_0] */ + LONG $0x760f4566; BYTE $0xdb // pcmpeqd xmm11, xmm11 + LONG $0xef0f4566; BYTE $0xe4 // pxor xmm12, xmm12 + +LBB7_9: + LONG $0x6f0f4166; BYTE $0xd2 // movdqa xmm2, xmm10 + LONG $0xef0f4166; BYTE $0xd0 // pxor xmm2, xmm8 + LONG $0x246f0ff3; BYTE $0xc7 // movdqu xmm4, oword [rdi + 8*rax] + LONG $0x6c6f0ff3; WORD $0x10c7 // movdqu xmm5, oword [rdi + 8*rax + 16] + LONG $0x6f0f44f3; WORD $0xc76c; BYTE $0x20 // movdqu xmm13, oword [rdi + 8*rax + 32] + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8 + LONG $0x6f0f4166; BYTE $0xc9 // movdqa xmm1, xmm9 + LONG $0xef0f4166; BYTE $0xc8 // pxor xmm1, xmm8 + LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0xdc6f0f66 // movdqa xmm3, xmm4 + LONG $0x380f4166; WORD $0xda15 // blendvpd xmm3, xmm10, xmm0 + LONG $0x746f0ff3; WORD $0x30c7 // movdqu xmm6, oword [rdi + 8*rax + 48] + LONG $0x6f0f4166; BYTE $0xfb // movdqa xmm7, xmm11 + LONG $0xef0f4166; BYTE $0xf8 // pxor xmm7, xmm8 + LONG $0xc56f0f66 // movdqa xmm0, xmm5 + LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8 + LONG $0x6f0f4166; BYTE $0xd4 // movdqa xmm2, xmm12 + LONG $0xef0f4166; BYTE $0xd0 // pxor xmm2, xmm8 + LONG $0x37380f66; BYTE $0xd0 // pcmpgtq xmm2, xmm0 + LONG $0x37380f66; BYTE $0xc7 // pcmpgtq xmm0, xmm7 + LONG $0xfd6f0f66 // movdqa xmm7, xmm5 + LONG $0x380f4166; WORD $0xfb15 // blendvpd xmm7, xmm11, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4166; WORD $0xe115 // blendvpd xmm4, xmm9, xmm0 + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x380f4166; WORD $0xec15 // blendvpd xmm5, xmm12, xmm0 + LONG $0xd3280f66 // movapd xmm2, xmm3 + LONG $0x570f4166; BYTE $0xd0 // xorpd xmm2, xmm8 + LONG $0x6f0f4166; BYTE $0xc5 // movdqa xmm0, xmm13 + LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8 + LONG $0xcc280f66 // movapd xmm1, xmm4 + LONG $0x570f4166; BYTE $0xc8 // xorpd xmm1, xmm8 + LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0x6f0f4566; BYTE $0xd5 // movdqa xmm10, xmm13 + LONG $0x380f4466; WORD $0xd315 // blendvpd xmm10, xmm3, xmm0 + LONG $0xdf280f66 // movapd xmm3, xmm7 + LONG $0x570f4166; BYTE $0xd8 // xorpd xmm3, xmm8 + LONG $0xc66f0f66 // movdqa xmm0, xmm6 + LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8 + LONG $0xd5280f66 // movapd xmm2, xmm5 + LONG $0x570f4166; BYTE $0xd0 // xorpd xmm2, xmm8 + LONG $0x37380f66; BYTE $0xd0 // pcmpgtq xmm2, xmm0 + LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3 + LONG $0x6f0f4466; BYTE $0xde // movdqa xmm11, xmm6 + LONG $0x380f4466; WORD $0xdf15 // blendvpd xmm11, xmm7, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4466; WORD $0xec15 // blendvpd xmm13, xmm4, xmm0 + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x15380f66; BYTE $0xf5 // blendvpd xmm6, xmm5, xmm0 + LONG $0x08c08348 // add rax, 8 + LONG $0x280f4566; BYTE $0xcd // movapd xmm9, xmm13 + LONG $0x280f4466; BYTE $0xe6 // movapd xmm12, xmm6 + LONG $0x02c28349 // add r10, 2 + JNE LBB7_9 + LONG $0x01c0f641 // test r8b, 1 + JE LBB7_12 + +LBB7_11: + LONG $0x24100f66; BYTE $0xc7 // movupd xmm4, oword [rdi + 8*rax] + LONG $0x5c100f66; WORD $0x10c7 // movupd xmm3, oword [rdi + 8*rax + 16] + LONG $0x6d280f66; BYTE $0x00 // movapd xmm5, oword 0[rbp] /* [rip + .LCPI7_0] */ + LONG $0xc6280f66 // movapd xmm0, xmm6 + LONG $0xc5570f66 // xorpd xmm0, xmm5 + LONG $0xcb280f66 // movapd xmm1, xmm3 + LONG $0xcd570f66 // xorpd xmm1, xmm5 + LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1 + LONG $0xfb280f66 // movapd xmm7, xmm3 + LONG $0x15380f66; BYTE $0xfe // blendvpd xmm7, xmm6, xmm0 + LONG $0x280f4166; BYTE $0xc5 // movapd xmm0, xmm13 + LONG $0xc5570f66 // xorpd xmm0, xmm5 + LONG $0xd4280f66 // movapd xmm2, xmm4 + LONG $0xd5570f66 // xorpd xmm2, xmm5 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0xf4280f66 // movapd xmm6, xmm4 + LONG $0x380f4166; WORD $0xf515 // blendvpd xmm6, xmm13, xmm0 + LONG $0x280f4166; BYTE $0xc3 // movapd xmm0, xmm11 + LONG $0xc5570f66 // xorpd xmm0, xmm5 + LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4166; WORD $0xdb15 // blendvpd xmm3, xmm11, xmm0 + LONG $0x570f4166; BYTE $0xea // xorpd xmm5, xmm10 + LONG $0x37380f66; BYTE $0xd5 // pcmpgtq xmm2, xmm5 + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x380f4166; WORD $0xe215 // blendvpd xmm4, xmm10, xmm0 + LONG $0x280f4466; BYTE $0xd4 // movapd xmm10, xmm4 + LONG $0x280f4466; BYTE $0xdb // movapd xmm11, xmm3 + LONG $0x280f4466; BYTE $0xee // movapd xmm13, xmm6 + LONG $0xf7280f66 // movapd xmm6, xmm7 + +LBB7_12: + LONG $0x4d280f66; BYTE $0x00 // movapd xmm1, oword 0[rbp] /* [rip + .LCPI7_0] */ + LONG $0xd6280f66 // movapd xmm2, xmm6 + LONG $0xd1570f66 // xorpd xmm2, xmm1 + LONG $0x280f4166; BYTE $0xc5 // movapd xmm0, xmm13 + LONG $0xc1570f66 // xorpd xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0x380f4166; WORD $0xf515 // blendvpd xmm6, xmm13, xmm0 + LONG $0xd6700f66; BYTE $0x4e // pshufd xmm2, xmm6, 78 + LONG $0xc6280f66 // movapd xmm0, xmm6 + LONG $0xc1570f66 // xorpd xmm0, xmm1 + LONG $0xda6f0f66 // movdqa xmm3, xmm2 + LONG $0xd9ef0f66 // pxor xmm3, xmm1 + LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3 + LONG $0x15380f66; BYTE $0xd6 // blendvpd xmm2, xmm6, xmm0 + LONG $0x7e0f4866; BYTE $0xd0 // movq rax, xmm2 + LONG $0x6f0f4166; BYTE $0xd2 // movdqa xmm2, xmm10 + LONG $0xd1ef0f66 // pxor xmm2, xmm1 + LONG $0x6f0f4166; BYTE $0xc3 // movdqa xmm0, xmm11 + LONG $0xc1ef0f66 // pxor xmm0, xmm1 + LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2 + LONG $0x380f4566; WORD $0xda15 // blendvpd xmm11, xmm10, xmm0 + LONG $0x700f4166; WORD $0x4ed3 // pshufd xmm2, xmm11, 78 + LONG $0x6f0f4166; BYTE $0xc3 // movdqa xmm0, xmm11 + LONG $0xc1ef0f66 // pxor xmm0, xmm1 + LONG $0xcaef0f66 // pxor xmm1, xmm2 + LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4166; WORD $0xd315 // blendvpd xmm2, xmm11, xmm0 + LONG $0x7e0f4966; BYTE $0xd0 // movq r8, xmm2 + WORD $0x394d; BYTE $0xcb // cmp r11, r9 + JE LBB7_13 + +LBB7_4: + WORD $0x8948; BYTE $0xc6 // mov rsi, rax + +LBB7_5: + LONG $0xdf048b4a // mov rax, qword [rdi + 8*r11] + WORD $0x3949; BYTE $0xc0 // cmp r8, rax + LONG $0xc0430f4c // cmovae r8, rax + WORD $0x3948; BYTE $0xc6 // cmp rsi, rax + LONG $0xc6470f48 // cmova rax, rsi + LONG $0x01c38349 // add r11, 1 + WORD $0x8948; BYTE $0xc6 // mov rsi, rax + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB7_5 + +LBB7_13: + WORD $0x8948; BYTE $0x01 // mov qword [rcx], rax + WORD $0x894c; BYTE $0x02 // mov qword [rdx], r8 + RET + +LBB7_7: + LONG $0x570f4566; BYTE $0xed // xorpd xmm13, xmm13 + LONG $0x760f4566; BYTE $0xd2 // pcmpeqd xmm10, xmm10 + WORD $0xc031 // xor eax, eax + LONG $0x760f4566; BYTE $0xdb // pcmpeqd xmm11, xmm11 + LONG $0xf6570f66 // xorpd xmm6, xmm6 + LONG $0x01c0f641 // test r8b, 1 + JNE LBB7_11 + JMP LBB7_12 diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go new file mode 100644 index 00000000..1666df12 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go @@ -0,0 +1,407 @@ +// Code generated by transpose_ints.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +// when we upgrade to support go1.18, this can be massively simplified by using +// Go Generics, but since we aren't supporting go1.18 yet, I didn't want to use +// them here so we can maintain the backwards compatibility. + +func transposeInt8Int8(src []int8, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeInt8Uint8(src []int8, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeInt8Int16(src []int8, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeInt8Uint16(src []int8, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeInt8Int32(src []int8, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeInt8Uint32(src []int8, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeInt8Int64(src []int8, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeInt8Uint64(src []int8, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeUint8Int8(src []uint8, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeUint8Uint8(src []uint8, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeUint8Int16(src []uint8, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeUint8Uint16(src []uint8, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeUint8Int32(src []uint8, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeUint8Uint32(src []uint8, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeUint8Int64(src []uint8, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeUint8Uint64(src []uint8, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeInt16Int8(src []int16, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeInt16Uint8(src []int16, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeInt16Int16(src []int16, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeInt16Uint16(src []int16, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeInt16Int32(src []int16, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeInt16Uint32(src []int16, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeInt16Int64(src []int16, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeInt16Uint64(src []int16, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeUint16Int8(src []uint16, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeUint16Uint8(src []uint16, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeUint16Int16(src []uint16, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeUint16Uint16(src []uint16, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeUint16Int32(src []uint16, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeUint16Uint32(src []uint16, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeUint16Int64(src []uint16, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeUint16Uint64(src []uint16, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeInt32Int8(src []int32, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeInt32Uint8(src []int32, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeInt32Int16(src []int32, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeInt32Uint16(src []int32, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeInt32Int32(src []int32, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeInt32Uint32(src []int32, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeInt32Int64(src []int32, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeInt32Uint64(src []int32, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeUint32Int8(src []uint32, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeUint32Uint8(src []uint32, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeUint32Int16(src []uint32, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeUint32Uint16(src []uint32, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeUint32Int32(src []uint32, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeUint32Uint32(src []uint32, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeUint32Int64(src []uint32, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeUint32Uint64(src []uint32, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeInt64Int8(src []int64, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeInt64Uint8(src []int64, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeInt64Int16(src []int64, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeInt64Uint16(src []int64, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeInt64Int32(src []int64, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeInt64Uint32(src []int64, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeInt64Int64(src []int64, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeInt64Uint64(src []int64, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} + +func transposeUint64Int8(src []uint64, dest []int8, transposeMap []int32) { + for i, s := range src { + dest[i] = int8(transposeMap[s]) + } +} + +func transposeUint64Uint8(src []uint64, dest []uint8, transposeMap []int32) { + for i, s := range src { + dest[i] = uint8(transposeMap[s]) + } +} + +func transposeUint64Int16(src []uint64, dest []int16, transposeMap []int32) { + for i, s := range src { + dest[i] = int16(transposeMap[s]) + } +} + +func transposeUint64Uint16(src []uint64, dest []uint16, transposeMap []int32) { + for i, s := range src { + dest[i] = uint16(transposeMap[s]) + } +} + +func transposeUint64Int32(src []uint64, dest []int32, transposeMap []int32) { + for i, s := range src { + dest[i] = int32(transposeMap[s]) + } +} + +func transposeUint64Uint32(src []uint64, dest []uint32, transposeMap []int32) { + for i, s := range src { + dest[i] = uint32(transposeMap[s]) + } +} + +func transposeUint64Int64(src []uint64, dest []int64, transposeMap []int32) { + for i, s := range src { + dest[i] = int64(transposeMap[s]) + } +} + +func transposeUint64Uint64(src []uint64, dest []uint64, transposeMap []int32) { + for i, s := range src { + dest[i] = uint64(transposeMap[s]) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go.tmpl new file mode 100644 index 00000000..680ae1ee --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go.tmpl @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type }} +{{ $srcName := .Name }} +{{ range $typelist }} +{{ $dest := .Type }} +{{ $destName := .Name }} + +func transpose{{ $srcName }}{{ $destName }}(src []{{$src}}, dest []{{$dest}}, transposeMap []int32) { + for i, s := range src { + dest[i] = {{ $dest }}(transposeMap[s]) + } +} + +{{ end }} +{{ end }} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.tmpldata b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.tmpldata new file mode 100644 index 00000000..72eaf300 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.tmpldata @@ -0,0 +1,34 @@ +[ + { + "Name": "Int8", + "Type": "int8" + }, + { + "Name": "Uint8", + "Type": "uint8" + }, + { + "Name": "Int16", + "Type": "int16" + }, + { + "Name": "Uint16", + "Type": "uint16" + }, + { + "Name": "Int32", + "Type": "int32" + }, + { + "Name": "Uint32", + "Type": "uint32" + }, + { + "Name": "Int64", + "Type": "int64" + }, + { + "Name": "Uint64", + "Type": "uint64" + } +] diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go new file mode 100644 index 00000000..d4433d36 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go @@ -0,0 +1,325 @@ +// Code generated by transpose_ints_amd64.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import ( + "golang.org/x/sys/cpu" +) + +var ( + TransposeInt8Int8 func([]int8, []int8, []int32) + TransposeInt8Uint8 func([]int8, []uint8, []int32) + TransposeInt8Int16 func([]int8, []int16, []int32) + TransposeInt8Uint16 func([]int8, []uint16, []int32) + TransposeInt8Int32 func([]int8, []int32, []int32) + TransposeInt8Uint32 func([]int8, []uint32, []int32) + TransposeInt8Int64 func([]int8, []int64, []int32) + TransposeInt8Uint64 func([]int8, []uint64, []int32) + + TransposeUint8Int8 func([]uint8, []int8, []int32) + TransposeUint8Uint8 func([]uint8, []uint8, []int32) + TransposeUint8Int16 func([]uint8, []int16, []int32) + TransposeUint8Uint16 func([]uint8, []uint16, []int32) + TransposeUint8Int32 func([]uint8, []int32, []int32) + TransposeUint8Uint32 func([]uint8, []uint32, []int32) + TransposeUint8Int64 func([]uint8, []int64, []int32) + TransposeUint8Uint64 func([]uint8, []uint64, []int32) + + TransposeInt16Int8 func([]int16, []int8, []int32) + TransposeInt16Uint8 func([]int16, []uint8, []int32) + TransposeInt16Int16 func([]int16, []int16, []int32) + TransposeInt16Uint16 func([]int16, []uint16, []int32) + TransposeInt16Int32 func([]int16, []int32, []int32) + TransposeInt16Uint32 func([]int16, []uint32, []int32) + TransposeInt16Int64 func([]int16, []int64, []int32) + TransposeInt16Uint64 func([]int16, []uint64, []int32) + + TransposeUint16Int8 func([]uint16, []int8, []int32) + TransposeUint16Uint8 func([]uint16, []uint8, []int32) + TransposeUint16Int16 func([]uint16, []int16, []int32) + TransposeUint16Uint16 func([]uint16, []uint16, []int32) + TransposeUint16Int32 func([]uint16, []int32, []int32) + TransposeUint16Uint32 func([]uint16, []uint32, []int32) + TransposeUint16Int64 func([]uint16, []int64, []int32) + TransposeUint16Uint64 func([]uint16, []uint64, []int32) + + TransposeInt32Int8 func([]int32, []int8, []int32) + TransposeInt32Uint8 func([]int32, []uint8, []int32) + TransposeInt32Int16 func([]int32, []int16, []int32) + TransposeInt32Uint16 func([]int32, []uint16, []int32) + TransposeInt32Int32 func([]int32, []int32, []int32) + TransposeInt32Uint32 func([]int32, []uint32, []int32) + TransposeInt32Int64 func([]int32, []int64, []int32) + TransposeInt32Uint64 func([]int32, []uint64, []int32) + + TransposeUint32Int8 func([]uint32, []int8, []int32) + TransposeUint32Uint8 func([]uint32, []uint8, []int32) + TransposeUint32Int16 func([]uint32, []int16, []int32) + TransposeUint32Uint16 func([]uint32, []uint16, []int32) + TransposeUint32Int32 func([]uint32, []int32, []int32) + TransposeUint32Uint32 func([]uint32, []uint32, []int32) + TransposeUint32Int64 func([]uint32, []int64, []int32) + TransposeUint32Uint64 func([]uint32, []uint64, []int32) + + TransposeInt64Int8 func([]int64, []int8, []int32) + TransposeInt64Uint8 func([]int64, []uint8, []int32) + TransposeInt64Int16 func([]int64, []int16, []int32) + TransposeInt64Uint16 func([]int64, []uint16, []int32) + TransposeInt64Int32 func([]int64, []int32, []int32) + TransposeInt64Uint32 func([]int64, []uint32, []int32) + TransposeInt64Int64 func([]int64, []int64, []int32) + TransposeInt64Uint64 func([]int64, []uint64, []int32) + + TransposeUint64Int8 func([]uint64, []int8, []int32) + TransposeUint64Uint8 func([]uint64, []uint8, []int32) + TransposeUint64Int16 func([]uint64, []int16, []int32) + TransposeUint64Uint16 func([]uint64, []uint16, []int32) + TransposeUint64Int32 func([]uint64, []int32, []int32) + TransposeUint64Uint32 func([]uint64, []uint32, []int32) + TransposeUint64Int64 func([]uint64, []int64, []int32) + TransposeUint64Uint64 func([]uint64, []uint64, []int32) +) + +func init() { + if cpu.X86.HasAVX2 { + + TransposeInt8Int8 = transposeInt8Int8avx2 + TransposeInt8Uint8 = transposeInt8Uint8avx2 + TransposeInt8Int16 = transposeInt8Int16avx2 + TransposeInt8Uint16 = transposeInt8Uint16avx2 + TransposeInt8Int32 = transposeInt8Int32avx2 + TransposeInt8Uint32 = transposeInt8Uint32avx2 + TransposeInt8Int64 = transposeInt8Int64avx2 + TransposeInt8Uint64 = transposeInt8Uint64avx2 + + TransposeUint8Int8 = transposeUint8Int8avx2 + TransposeUint8Uint8 = transposeUint8Uint8avx2 + TransposeUint8Int16 = transposeUint8Int16avx2 + TransposeUint8Uint16 = transposeUint8Uint16avx2 + TransposeUint8Int32 = transposeUint8Int32avx2 + TransposeUint8Uint32 = transposeUint8Uint32avx2 + TransposeUint8Int64 = transposeUint8Int64avx2 + TransposeUint8Uint64 = transposeUint8Uint64avx2 + + TransposeInt16Int8 = transposeInt16Int8avx2 + TransposeInt16Uint8 = transposeInt16Uint8avx2 + TransposeInt16Int16 = transposeInt16Int16avx2 + TransposeInt16Uint16 = transposeInt16Uint16avx2 + TransposeInt16Int32 = transposeInt16Int32avx2 + TransposeInt16Uint32 = transposeInt16Uint32avx2 + TransposeInt16Int64 = transposeInt16Int64avx2 + TransposeInt16Uint64 = transposeInt16Uint64avx2 + + TransposeUint16Int8 = transposeUint16Int8avx2 + TransposeUint16Uint8 = transposeUint16Uint8avx2 + TransposeUint16Int16 = transposeUint16Int16avx2 + TransposeUint16Uint16 = transposeUint16Uint16avx2 + TransposeUint16Int32 = transposeUint16Int32avx2 + TransposeUint16Uint32 = transposeUint16Uint32avx2 + TransposeUint16Int64 = transposeUint16Int64avx2 + TransposeUint16Uint64 = transposeUint16Uint64avx2 + + TransposeInt32Int8 = transposeInt32Int8avx2 + TransposeInt32Uint8 = transposeInt32Uint8avx2 + TransposeInt32Int16 = transposeInt32Int16avx2 + TransposeInt32Uint16 = transposeInt32Uint16avx2 + TransposeInt32Int32 = transposeInt32Int32avx2 + TransposeInt32Uint32 = transposeInt32Uint32avx2 + TransposeInt32Int64 = transposeInt32Int64avx2 + TransposeInt32Uint64 = transposeInt32Uint64avx2 + + TransposeUint32Int8 = transposeUint32Int8avx2 + TransposeUint32Uint8 = transposeUint32Uint8avx2 + TransposeUint32Int16 = transposeUint32Int16avx2 + TransposeUint32Uint16 = transposeUint32Uint16avx2 + TransposeUint32Int32 = transposeUint32Int32avx2 + TransposeUint32Uint32 = transposeUint32Uint32avx2 + TransposeUint32Int64 = transposeUint32Int64avx2 + TransposeUint32Uint64 = transposeUint32Uint64avx2 + + TransposeInt64Int8 = transposeInt64Int8avx2 + TransposeInt64Uint8 = transposeInt64Uint8avx2 + TransposeInt64Int16 = transposeInt64Int16avx2 + TransposeInt64Uint16 = transposeInt64Uint16avx2 + TransposeInt64Int32 = transposeInt64Int32avx2 + TransposeInt64Uint32 = transposeInt64Uint32avx2 + TransposeInt64Int64 = transposeInt64Int64avx2 + TransposeInt64Uint64 = transposeInt64Uint64avx2 + + TransposeUint64Int8 = transposeUint64Int8avx2 + TransposeUint64Uint8 = transposeUint64Uint8avx2 + TransposeUint64Int16 = transposeUint64Int16avx2 + TransposeUint64Uint16 = transposeUint64Uint16avx2 + TransposeUint64Int32 = transposeUint64Int32avx2 + TransposeUint64Uint32 = transposeUint64Uint32avx2 + TransposeUint64Int64 = transposeUint64Int64avx2 + TransposeUint64Uint64 = transposeUint64Uint64avx2 + + } else if cpu.X86.HasSSE42 { + + TransposeInt8Int8 = transposeInt8Int8sse4 + TransposeInt8Uint8 = transposeInt8Uint8sse4 + TransposeInt8Int16 = transposeInt8Int16sse4 + TransposeInt8Uint16 = transposeInt8Uint16sse4 + TransposeInt8Int32 = transposeInt8Int32sse4 + TransposeInt8Uint32 = transposeInt8Uint32sse4 + TransposeInt8Int64 = transposeInt8Int64sse4 + TransposeInt8Uint64 = transposeInt8Uint64sse4 + + TransposeUint8Int8 = transposeUint8Int8sse4 + TransposeUint8Uint8 = transposeUint8Uint8sse4 + TransposeUint8Int16 = transposeUint8Int16sse4 + TransposeUint8Uint16 = transposeUint8Uint16sse4 + TransposeUint8Int32 = transposeUint8Int32sse4 + TransposeUint8Uint32 = transposeUint8Uint32sse4 + TransposeUint8Int64 = transposeUint8Int64sse4 + TransposeUint8Uint64 = transposeUint8Uint64sse4 + + TransposeInt16Int8 = transposeInt16Int8sse4 + TransposeInt16Uint8 = transposeInt16Uint8sse4 + TransposeInt16Int16 = transposeInt16Int16sse4 + TransposeInt16Uint16 = transposeInt16Uint16sse4 + TransposeInt16Int32 = transposeInt16Int32sse4 + TransposeInt16Uint32 = transposeInt16Uint32sse4 + TransposeInt16Int64 = transposeInt16Int64sse4 + TransposeInt16Uint64 = transposeInt16Uint64sse4 + + TransposeUint16Int8 = transposeUint16Int8sse4 + TransposeUint16Uint8 = transposeUint16Uint8sse4 + TransposeUint16Int16 = transposeUint16Int16sse4 + TransposeUint16Uint16 = transposeUint16Uint16sse4 + TransposeUint16Int32 = transposeUint16Int32sse4 + TransposeUint16Uint32 = transposeUint16Uint32sse4 + TransposeUint16Int64 = transposeUint16Int64sse4 + TransposeUint16Uint64 = transposeUint16Uint64sse4 + + TransposeInt32Int8 = transposeInt32Int8sse4 + TransposeInt32Uint8 = transposeInt32Uint8sse4 + TransposeInt32Int16 = transposeInt32Int16sse4 + TransposeInt32Uint16 = transposeInt32Uint16sse4 + TransposeInt32Int32 = transposeInt32Int32sse4 + TransposeInt32Uint32 = transposeInt32Uint32sse4 + TransposeInt32Int64 = transposeInt32Int64sse4 + TransposeInt32Uint64 = transposeInt32Uint64sse4 + + TransposeUint32Int8 = transposeUint32Int8sse4 + TransposeUint32Uint8 = transposeUint32Uint8sse4 + TransposeUint32Int16 = transposeUint32Int16sse4 + TransposeUint32Uint16 = transposeUint32Uint16sse4 + TransposeUint32Int32 = transposeUint32Int32sse4 + TransposeUint32Uint32 = transposeUint32Uint32sse4 + TransposeUint32Int64 = transposeUint32Int64sse4 + TransposeUint32Uint64 = transposeUint32Uint64sse4 + + TransposeInt64Int8 = transposeInt64Int8sse4 + TransposeInt64Uint8 = transposeInt64Uint8sse4 + TransposeInt64Int16 = transposeInt64Int16sse4 + TransposeInt64Uint16 = transposeInt64Uint16sse4 + TransposeInt64Int32 = transposeInt64Int32sse4 + TransposeInt64Uint32 = transposeInt64Uint32sse4 + TransposeInt64Int64 = transposeInt64Int64sse4 + TransposeInt64Uint64 = transposeInt64Uint64sse4 + + TransposeUint64Int8 = transposeUint64Int8sse4 + TransposeUint64Uint8 = transposeUint64Uint8sse4 + TransposeUint64Int16 = transposeUint64Int16sse4 + TransposeUint64Uint16 = transposeUint64Uint16sse4 + TransposeUint64Int32 = transposeUint64Int32sse4 + TransposeUint64Uint32 = transposeUint64Uint32sse4 + TransposeUint64Int64 = transposeUint64Int64sse4 + TransposeUint64Uint64 = transposeUint64Uint64sse4 + + } else { + + TransposeInt8Int8 = transposeInt8Int8 + TransposeInt8Uint8 = transposeInt8Uint8 + TransposeInt8Int16 = transposeInt8Int16 + TransposeInt8Uint16 = transposeInt8Uint16 + TransposeInt8Int32 = transposeInt8Int32 + TransposeInt8Uint32 = transposeInt8Uint32 + TransposeInt8Int64 = transposeInt8Int64 + TransposeInt8Uint64 = transposeInt8Uint64 + + TransposeUint8Int8 = transposeUint8Int8 + TransposeUint8Uint8 = transposeUint8Uint8 + TransposeUint8Int16 = transposeUint8Int16 + TransposeUint8Uint16 = transposeUint8Uint16 + TransposeUint8Int32 = transposeUint8Int32 + TransposeUint8Uint32 = transposeUint8Uint32 + TransposeUint8Int64 = transposeUint8Int64 + TransposeUint8Uint64 = transposeUint8Uint64 + + TransposeInt16Int8 = transposeInt16Int8 + TransposeInt16Uint8 = transposeInt16Uint8 + TransposeInt16Int16 = transposeInt16Int16 + TransposeInt16Uint16 = transposeInt16Uint16 + TransposeInt16Int32 = transposeInt16Int32 + TransposeInt16Uint32 = transposeInt16Uint32 + TransposeInt16Int64 = transposeInt16Int64 + TransposeInt16Uint64 = transposeInt16Uint64 + + TransposeUint16Int8 = transposeUint16Int8 + TransposeUint16Uint8 = transposeUint16Uint8 + TransposeUint16Int16 = transposeUint16Int16 + TransposeUint16Uint16 = transposeUint16Uint16 + TransposeUint16Int32 = transposeUint16Int32 + TransposeUint16Uint32 = transposeUint16Uint32 + TransposeUint16Int64 = transposeUint16Int64 + TransposeUint16Uint64 = transposeUint16Uint64 + + TransposeInt32Int8 = transposeInt32Int8 + TransposeInt32Uint8 = transposeInt32Uint8 + TransposeInt32Int16 = transposeInt32Int16 + TransposeInt32Uint16 = transposeInt32Uint16 + TransposeInt32Int32 = transposeInt32Int32 + TransposeInt32Uint32 = transposeInt32Uint32 + TransposeInt32Int64 = transposeInt32Int64 + TransposeInt32Uint64 = transposeInt32Uint64 + + TransposeUint32Int8 = transposeUint32Int8 + TransposeUint32Uint8 = transposeUint32Uint8 + TransposeUint32Int16 = transposeUint32Int16 + TransposeUint32Uint16 = transposeUint32Uint16 + TransposeUint32Int32 = transposeUint32Int32 + TransposeUint32Uint32 = transposeUint32Uint32 + TransposeUint32Int64 = transposeUint32Int64 + TransposeUint32Uint64 = transposeUint32Uint64 + + TransposeInt64Int8 = transposeInt64Int8 + TransposeInt64Uint8 = transposeInt64Uint8 + TransposeInt64Int16 = transposeInt64Int16 + TransposeInt64Uint16 = transposeInt64Uint16 + TransposeInt64Int32 = transposeInt64Int32 + TransposeInt64Uint32 = transposeInt64Uint32 + TransposeInt64Int64 = transposeInt64Int64 + TransposeInt64Uint64 = transposeInt64Uint64 + + TransposeUint64Int8 = transposeUint64Int8 + TransposeUint64Uint8 = transposeUint64Uint8 + TransposeUint64Int16 = transposeUint64Int16 + TransposeUint64Uint16 = transposeUint64Uint16 + TransposeUint64Int32 = transposeUint64Int32 + TransposeUint64Uint32 = transposeUint64Uint32 + TransposeUint64Int64 = transposeUint64Int64 + TransposeUint64Uint64 = transposeUint64Uint64 + + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go.tmpl new file mode 100644 index 00000000..eac0208e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go.tmpl @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package utils + +import ( + "golang.org/x/sys/cpu" +) + +var ( +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} func([]{{$src}}, []{{$dest}}, []int32) +{{end}} +{{end}} +) + + +func init() { + if cpu.X86.HasAVX2 { +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }}avx2 +{{end}} +{{end}} + } else if cpu.X86.HasSSE42 { +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }}sse4 +{{end}} +{{end}} + } else { +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }} +{{end}} +{{end}} + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_arm64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_arm64.go new file mode 100644 index 00000000..cc957cda --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_arm64.go @@ -0,0 +1,96 @@ +// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( + TransposeInt8Int8 = transposeInt8Int8 + TransposeInt8Uint8 = transposeInt8Uint8 + TransposeInt8Int16 = transposeInt8Int16 + TransposeInt8Uint16 = transposeInt8Uint16 + TransposeInt8Int32 = transposeInt8Int32 + TransposeInt8Uint32 = transposeInt8Uint32 + TransposeInt8Int64 = transposeInt8Int64 + TransposeInt8Uint64 = transposeInt8Uint64 + + TransposeUint8Int8 = transposeUint8Int8 + TransposeUint8Uint8 = transposeUint8Uint8 + TransposeUint8Int16 = transposeUint8Int16 + TransposeUint8Uint16 = transposeUint8Uint16 + TransposeUint8Int32 = transposeUint8Int32 + TransposeUint8Uint32 = transposeUint8Uint32 + TransposeUint8Int64 = transposeUint8Int64 + TransposeUint8Uint64 = transposeUint8Uint64 + + TransposeInt16Int8 = transposeInt16Int8 + TransposeInt16Uint8 = transposeInt16Uint8 + TransposeInt16Int16 = transposeInt16Int16 + TransposeInt16Uint16 = transposeInt16Uint16 + TransposeInt16Int32 = transposeInt16Int32 + TransposeInt16Uint32 = transposeInt16Uint32 + TransposeInt16Int64 = transposeInt16Int64 + TransposeInt16Uint64 = transposeInt16Uint64 + + TransposeUint16Int8 = transposeUint16Int8 + TransposeUint16Uint8 = transposeUint16Uint8 + TransposeUint16Int16 = transposeUint16Int16 + TransposeUint16Uint16 = transposeUint16Uint16 + TransposeUint16Int32 = transposeUint16Int32 + TransposeUint16Uint32 = transposeUint16Uint32 + TransposeUint16Int64 = transposeUint16Int64 + TransposeUint16Uint64 = transposeUint16Uint64 + + TransposeInt32Int8 = transposeInt32Int8 + TransposeInt32Uint8 = transposeInt32Uint8 + TransposeInt32Int16 = transposeInt32Int16 + TransposeInt32Uint16 = transposeInt32Uint16 + TransposeInt32Int32 = transposeInt32Int32 + TransposeInt32Uint32 = transposeInt32Uint32 + TransposeInt32Int64 = transposeInt32Int64 + TransposeInt32Uint64 = transposeInt32Uint64 + + TransposeUint32Int8 = transposeUint32Int8 + TransposeUint32Uint8 = transposeUint32Uint8 + TransposeUint32Int16 = transposeUint32Int16 + TransposeUint32Uint16 = transposeUint32Uint16 + TransposeUint32Int32 = transposeUint32Int32 + TransposeUint32Uint32 = transposeUint32Uint32 + TransposeUint32Int64 = transposeUint32Int64 + TransposeUint32Uint64 = transposeUint32Uint64 + + TransposeInt64Int8 = transposeInt64Int8 + TransposeInt64Uint8 = transposeInt64Uint8 + TransposeInt64Int16 = transposeInt64Int16 + TransposeInt64Uint16 = transposeInt64Uint16 + TransposeInt64Int32 = transposeInt64Int32 + TransposeInt64Uint32 = transposeInt64Uint32 + TransposeInt64Int64 = transposeInt64Int64 + TransposeInt64Uint64 = transposeInt64Uint64 + + TransposeUint64Int8 = transposeUint64Int8 + TransposeUint64Uint8 = transposeUint64Uint8 + TransposeUint64Int16 = transposeUint64Int16 + TransposeUint64Uint16 = transposeUint64Uint16 + TransposeUint64Int32 = transposeUint64Int32 + TransposeUint64Uint32 = transposeUint64Uint32 + TransposeUint64Int64 = transposeUint64Int64 + TransposeUint64Uint64 = transposeUint64Uint64 +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.go new file mode 100644 index 00000000..f1421ddf --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.go @@ -0,0 +1,473 @@ +// Code generated by transpose_ints_simd.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import ( + "unsafe" +) + +//go:noescape +func _transpose_int8_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int8avx2(src []int8, dest []int8, transposeMap []int32) { + _transpose_int8_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint8avx2(src []int8, dest []uint8, transposeMap []int32) { + _transpose_int8_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int16avx2(src []int8, dest []int16, transposeMap []int32) { + _transpose_int8_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint16avx2(src []int8, dest []uint16, transposeMap []int32) { + _transpose_int8_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int32avx2(src []int8, dest []int32, transposeMap []int32) { + _transpose_int8_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint32avx2(src []int8, dest []uint32, transposeMap []int32) { + _transpose_int8_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int64avx2(src []int8, dest []int64, transposeMap []int32) { + _transpose_int8_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint64avx2(src []int8, dest []uint64, transposeMap []int32) { + _transpose_int8_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int8avx2(src []uint8, dest []int8, transposeMap []int32) { + _transpose_uint8_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint8avx2(src []uint8, dest []uint8, transposeMap []int32) { + _transpose_uint8_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int16avx2(src []uint8, dest []int16, transposeMap []int32) { + _transpose_uint8_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint16avx2(src []uint8, dest []uint16, transposeMap []int32) { + _transpose_uint8_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int32avx2(src []uint8, dest []int32, transposeMap []int32) { + _transpose_uint8_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint32avx2(src []uint8, dest []uint32, transposeMap []int32) { + _transpose_uint8_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int64avx2(src []uint8, dest []int64, transposeMap []int32) { + _transpose_uint8_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint64avx2(src []uint8, dest []uint64, transposeMap []int32) { + _transpose_uint8_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int8avx2(src []int16, dest []int8, transposeMap []int32) { + _transpose_int16_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint8avx2(src []int16, dest []uint8, transposeMap []int32) { + _transpose_int16_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int16avx2(src []int16, dest []int16, transposeMap []int32) { + _transpose_int16_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint16avx2(src []int16, dest []uint16, transposeMap []int32) { + _transpose_int16_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int32avx2(src []int16, dest []int32, transposeMap []int32) { + _transpose_int16_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint32avx2(src []int16, dest []uint32, transposeMap []int32) { + _transpose_int16_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int64avx2(src []int16, dest []int64, transposeMap []int32) { + _transpose_int16_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint64avx2(src []int16, dest []uint64, transposeMap []int32) { + _transpose_int16_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int8avx2(src []uint16, dest []int8, transposeMap []int32) { + _transpose_uint16_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint8avx2(src []uint16, dest []uint8, transposeMap []int32) { + _transpose_uint16_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int16avx2(src []uint16, dest []int16, transposeMap []int32) { + _transpose_uint16_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint16avx2(src []uint16, dest []uint16, transposeMap []int32) { + _transpose_uint16_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int32avx2(src []uint16, dest []int32, transposeMap []int32) { + _transpose_uint16_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint32avx2(src []uint16, dest []uint32, transposeMap []int32) { + _transpose_uint16_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int64avx2(src []uint16, dest []int64, transposeMap []int32) { + _transpose_uint16_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint64avx2(src []uint16, dest []uint64, transposeMap []int32) { + _transpose_uint16_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int8avx2(src []int32, dest []int8, transposeMap []int32) { + _transpose_int32_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint8avx2(src []int32, dest []uint8, transposeMap []int32) { + _transpose_int32_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int16avx2(src []int32, dest []int16, transposeMap []int32) { + _transpose_int32_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint16avx2(src []int32, dest []uint16, transposeMap []int32) { + _transpose_int32_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int32avx2(src []int32, dest []int32, transposeMap []int32) { + _transpose_int32_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint32avx2(src []int32, dest []uint32, transposeMap []int32) { + _transpose_int32_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int64avx2(src []int32, dest []int64, transposeMap []int32) { + _transpose_int32_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint64avx2(src []int32, dest []uint64, transposeMap []int32) { + _transpose_int32_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int8avx2(src []uint32, dest []int8, transposeMap []int32) { + _transpose_uint32_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint8avx2(src []uint32, dest []uint8, transposeMap []int32) { + _transpose_uint32_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int16avx2(src []uint32, dest []int16, transposeMap []int32) { + _transpose_uint32_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint16avx2(src []uint32, dest []uint16, transposeMap []int32) { + _transpose_uint32_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int32avx2(src []uint32, dest []int32, transposeMap []int32) { + _transpose_uint32_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint32avx2(src []uint32, dest []uint32, transposeMap []int32) { + _transpose_uint32_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int64avx2(src []uint32, dest []int64, transposeMap []int32) { + _transpose_uint32_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint64avx2(src []uint32, dest []uint64, transposeMap []int32) { + _transpose_uint32_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int8avx2(src []int64, dest []int8, transposeMap []int32) { + _transpose_int64_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint8avx2(src []int64, dest []uint8, transposeMap []int32) { + _transpose_int64_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int16avx2(src []int64, dest []int16, transposeMap []int32) { + _transpose_int64_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint16avx2(src []int64, dest []uint16, transposeMap []int32) { + _transpose_int64_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int32avx2(src []int64, dest []int32, transposeMap []int32) { + _transpose_int64_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint32avx2(src []int64, dest []uint32, transposeMap []int32) { + _transpose_int64_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int64avx2(src []int64, dest []int64, transposeMap []int32) { + _transpose_int64_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint64avx2(src []int64, dest []uint64, transposeMap []int32) { + _transpose_int64_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int8avx2(src []uint64, dest []int8, transposeMap []int32) { + _transpose_uint64_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint8avx2(src []uint64, dest []uint8, transposeMap []int32) { + _transpose_uint64_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int16avx2(src []uint64, dest []int16, transposeMap []int32) { + _transpose_uint64_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint16avx2(src []uint64, dest []uint16, transposeMap []int32) { + _transpose_uint64_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int32avx2(src []uint64, dest []int32, transposeMap []int32) { + _transpose_uint64_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint32avx2(src []uint64, dest []uint32, transposeMap []int32) { + _transpose_uint64_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int64avx2(src []uint64, dest []int64, transposeMap []int32) { + _transpose_uint64_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint64avx2(src []uint64, dest []uint64, transposeMap []int32) { + _transpose_uint64_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.s new file mode 100644 index 00000000..fbcc101e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.s @@ -0,0 +1,3074 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_transpose_uint8_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB0_1 + +LBB0_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB0_5 + +LBB0_1: + WORD $0xd285 // test edx, edx + JLE LBB0_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB0_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB0_3 + +LBB0_4: + RET + +TEXT ยท_transpose_int8_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB1_1 + +LBB1_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB1_5 + +LBB1_1: + WORD $0xd285 // test edx, edx + JLE LBB1_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB1_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB1_3 + +LBB1_4: + RET + +TEXT ยท_transpose_uint16_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB2_1 + +LBB2_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB2_5 + +LBB2_1: + WORD $0xd285 // test edx, edx + JLE LBB2_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB2_3: + LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB2_3 + +LBB2_4: + RET + +TEXT ยท_transpose_int16_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB3_1 + +LBB3_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB3_5 + +LBB3_1: + WORD $0xd285 // test edx, edx + JLE LBB3_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB3_3: + LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB3_3 + +LBB3_4: + RET + +TEXT ยท_transpose_uint32_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB4_1 + +LBB4_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB4_5 + +LBB4_1: + WORD $0xd285 // test edx, edx + JLE LBB4_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB4_3: + LONG $0x87048b42 // mov eax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB4_3 + +LBB4_4: + RET + +TEXT ยท_transpose_int32_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB5_1 + +LBB5_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB5_5 + +LBB5_1: + WORD $0xd285 // test edx, edx + JLE LBB5_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB5_3: + LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB5_3 + +LBB5_4: + RET + +TEXT ยท_transpose_uint64_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB6_1 + +LBB6_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB6_5 + +LBB6_1: + WORD $0xd285 // test edx, edx + JLE LBB6_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB6_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB6_3 + +LBB6_4: + RET + +TEXT ยท_transpose_int64_uint8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB7_1 + +LBB7_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB7_5 + +LBB7_1: + WORD $0xd285 // test edx, edx + JLE LBB7_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB7_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB7_3 + +LBB7_4: + RET + +TEXT ยท_transpose_uint8_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB8_1 + +LBB8_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB8_5 + +LBB8_1: + WORD $0xd285 // test edx, edx + JLE LBB8_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB8_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB8_3 + +LBB8_4: + RET + +TEXT ยท_transpose_int8_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB9_1 + +LBB9_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB9_5 + +LBB9_1: + WORD $0xd285 // test edx, edx + JLE LBB9_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB9_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB9_3 + +LBB9_4: + RET + +TEXT ยท_transpose_uint16_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB10_1 + +LBB10_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB10_5 + +LBB10_1: + WORD $0xd285 // test edx, edx + JLE LBB10_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB10_3: + LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB10_3 + +LBB10_4: + RET + +TEXT ยท_transpose_int16_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB11_1 + +LBB11_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB11_5 + +LBB11_1: + WORD $0xd285 // test edx, edx + JLE LBB11_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB11_3: + LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB11_3 + +LBB11_4: + RET + +TEXT ยท_transpose_uint32_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB12_1 + +LBB12_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB12_5 + +LBB12_1: + WORD $0xd285 // test edx, edx + JLE LBB12_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB12_3: + LONG $0x87048b42 // mov eax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB12_3 + +LBB12_4: + RET + +TEXT ยท_transpose_int32_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB13_1 + +LBB13_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB13_5 + +LBB13_1: + WORD $0xd285 // test edx, edx + JLE LBB13_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB13_3: + LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB13_3 + +LBB13_4: + RET + +TEXT ยท_transpose_uint64_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB14_1 + +LBB14_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB14_5 + +LBB14_1: + WORD $0xd285 // test edx, edx + JLE LBB14_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB14_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB14_3 + +LBB14_4: + RET + +TEXT ยท_transpose_int64_int8_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB15_1 + +LBB15_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB15_5 + +LBB15_1: + WORD $0xd285 // test edx, edx + JLE LBB15_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB15_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB15_3 + +LBB15_4: + RET + +TEXT ยท_transpose_uint8_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB16_1 + +LBB16_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB16_5 + +LBB16_1: + WORD $0xd285 // test edx, edx + JLE LBB16_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB16_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB16_3 + +LBB16_4: + RET + +TEXT ยท_transpose_int8_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB17_1 + +LBB17_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB17_5 + +LBB17_1: + WORD $0xd285 // test edx, edx + JLE LBB17_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB17_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB17_3 + +LBB17_4: + RET + +TEXT ยท_transpose_uint16_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB18_1 + +LBB18_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB18_5 + +LBB18_1: + WORD $0xd285 // test edx, edx + JLE LBB18_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB18_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB18_3 + +LBB18_4: + RET + +TEXT ยท_transpose_int16_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB19_1 + +LBB19_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB19_5 + +LBB19_1: + WORD $0xd285 // test edx, edx + JLE LBB19_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB19_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB19_3 + +LBB19_4: + RET + +TEXT ยท_transpose_uint32_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB20_1 + +LBB20_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB20_5 + +LBB20_1: + WORD $0xd285 // test edx, edx + JLE LBB20_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB20_3: + LONG $0x47048b42 // mov eax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB20_3 + +LBB20_4: + RET + +TEXT ยท_transpose_int32_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB21_1 + +LBB21_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB21_5 + +LBB21_1: + WORD $0xd285 // test edx, edx + JLE LBB21_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB21_3: + LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB21_3 + +LBB21_4: + RET + +TEXT ยท_transpose_uint64_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB22_1 + +LBB22_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB22_5 + +LBB22_1: + WORD $0xd285 // test edx, edx + JLE LBB22_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB22_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB22_3 + +LBB22_4: + RET + +TEXT ยท_transpose_int64_uint16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB23_1 + +LBB23_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB23_5 + +LBB23_1: + WORD $0xd285 // test edx, edx + JLE LBB23_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB23_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB23_3 + +LBB23_4: + RET + +TEXT ยท_transpose_uint8_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB24_1 + +LBB24_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB24_5 + +LBB24_1: + WORD $0xd285 // test edx, edx + JLE LBB24_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB24_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB24_3 + +LBB24_4: + RET + +TEXT ยท_transpose_int8_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB25_1 + +LBB25_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB25_5 + +LBB25_1: + WORD $0xd285 // test edx, edx + JLE LBB25_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB25_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB25_3 + +LBB25_4: + RET + +TEXT ยท_transpose_uint16_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB26_1 + +LBB26_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB26_5 + +LBB26_1: + WORD $0xd285 // test edx, edx + JLE LBB26_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB26_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB26_3 + +LBB26_4: + RET + +TEXT ยท_transpose_int16_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB27_1 + +LBB27_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB27_5 + +LBB27_1: + WORD $0xd285 // test edx, edx + JLE LBB27_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB27_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB27_3 + +LBB27_4: + RET + +TEXT ยท_transpose_uint32_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB28_1 + +LBB28_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB28_5 + +LBB28_1: + WORD $0xd285 // test edx, edx + JLE LBB28_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB28_3: + LONG $0x47048b42 // mov eax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB28_3 + +LBB28_4: + RET + +TEXT ยท_transpose_int32_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB29_1 + +LBB29_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB29_5 + +LBB29_1: + WORD $0xd285 // test edx, edx + JLE LBB29_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB29_3: + LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB29_3 + +LBB29_4: + RET + +TEXT ยท_transpose_uint64_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB30_1 + +LBB30_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB30_5 + +LBB30_1: + WORD $0xd285 // test edx, edx + JLE LBB30_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB30_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB30_3 + +LBB30_4: + RET + +TEXT ยท_transpose_int64_int16_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB31_1 + +LBB31_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB31_5 + +LBB31_1: + WORD $0xd285 // test edx, edx + JLE LBB31_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB31_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB31_3 + +LBB31_4: + RET + +TEXT ยท_transpose_uint8_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB32_1 + +LBB32_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB32_5 + +LBB32_1: + WORD $0xd285 // test edx, edx + JLE LBB32_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB32_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB32_3 + +LBB32_4: + RET + +TEXT ยท_transpose_int8_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB33_1 + +LBB33_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB33_5 + +LBB33_1: + WORD $0xd285 // test edx, edx + JLE LBB33_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB33_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB33_3 + +LBB33_4: + RET + +TEXT ยท_transpose_uint16_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB34_1 + +LBB34_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB34_5 + +LBB34_1: + WORD $0xd285 // test edx, edx + JLE LBB34_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB34_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB34_3 + +LBB34_4: + RET + +TEXT ยท_transpose_int16_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB35_1 + +LBB35_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB35_5 + +LBB35_1: + WORD $0xd285 // test edx, edx + JLE LBB35_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB35_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB35_3 + +LBB35_4: + RET + +TEXT ยท_transpose_uint32_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB36_1 + +LBB36_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB36_5 + +LBB36_1: + WORD $0xd285 // test edx, edx + JLE LBB36_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB36_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB36_3 + +LBB36_4: + RET + +TEXT ยท_transpose_int32_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB37_1 + +LBB37_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB37_5 + +LBB37_1: + WORD $0xd285 // test edx, edx + JLE LBB37_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB37_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB37_3 + +LBB37_4: + RET + +TEXT ยท_transpose_uint64_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB38_1 + +LBB38_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB38_5 + +LBB38_1: + WORD $0xd285 // test edx, edx + JLE LBB38_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB38_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB38_3 + +LBB38_4: + RET + +TEXT ยท_transpose_int64_uint32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB39_1 + +LBB39_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB39_5 + +LBB39_1: + WORD $0xd285 // test edx, edx + JLE LBB39_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB39_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB39_3 + +LBB39_4: + RET + +TEXT ยท_transpose_uint8_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB40_1 + +LBB40_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB40_5 + +LBB40_1: + WORD $0xd285 // test edx, edx + JLE LBB40_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB40_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB40_3 + +LBB40_4: + RET + +TEXT ยท_transpose_int8_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB41_1 + +LBB41_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB41_5 + +LBB41_1: + WORD $0xd285 // test edx, edx + JLE LBB41_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB41_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB41_3 + +LBB41_4: + RET + +TEXT ยท_transpose_uint16_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB42_1 + +LBB42_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB42_5 + +LBB42_1: + WORD $0xd285 // test edx, edx + JLE LBB42_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB42_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB42_3 + +LBB42_4: + RET + +TEXT ยท_transpose_int16_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB43_1 + +LBB43_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB43_5 + +LBB43_1: + WORD $0xd285 // test edx, edx + JLE LBB43_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB43_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB43_3 + +LBB43_4: + RET + +TEXT ยท_transpose_uint32_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB44_1 + +LBB44_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB44_5 + +LBB44_1: + WORD $0xd285 // test edx, edx + JLE LBB44_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB44_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB44_3 + +LBB44_4: + RET + +TEXT ยท_transpose_int32_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB45_1 + +LBB45_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB45_5 + +LBB45_1: + WORD $0xd285 // test edx, edx + JLE LBB45_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB45_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB45_3 + +LBB45_4: + RET + +TEXT ยท_transpose_uint64_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB46_1 + +LBB46_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB46_5 + +LBB46_1: + WORD $0xd285 // test edx, edx + JLE LBB46_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB46_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB46_3 + +LBB46_4: + RET + +TEXT ยท_transpose_int64_int32_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB47_1 + +LBB47_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB47_5 + +LBB47_1: + WORD $0xd285 // test edx, edx + JLE LBB47_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB47_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB47_3 + +LBB47_4: + RET + +TEXT ยท_transpose_uint8_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB48_1 + +LBB48_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB48_5 + +LBB48_1: + WORD $0xd285 // test edx, edx + JLE LBB48_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB48_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB48_3 + +LBB48_4: + RET + +TEXT ยท_transpose_int8_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB49_1 + +LBB49_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB49_5 + +LBB49_1: + WORD $0xd285 // test edx, edx + JLE LBB49_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB49_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB49_3 + +LBB49_4: + RET + +TEXT ยท_transpose_uint16_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB50_1 + +LBB50_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB50_5 + +LBB50_1: + WORD $0xd285 // test edx, edx + JLE LBB50_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB50_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB50_3 + +LBB50_4: + RET + +TEXT ยท_transpose_int16_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB51_1 + +LBB51_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB51_5 + +LBB51_1: + WORD $0xd285 // test edx, edx + JLE LBB51_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB51_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB51_3 + +LBB51_4: + RET + +TEXT ยท_transpose_uint32_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB52_1 + +LBB52_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB52_5 + +LBB52_1: + WORD $0xd285 // test edx, edx + JLE LBB52_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB52_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB52_3 + +LBB52_4: + RET + +TEXT ยท_transpose_int32_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB53_1 + +LBB53_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB53_5 + +LBB53_1: + WORD $0xd285 // test edx, edx + JLE LBB53_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB53_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB53_3 + +LBB53_4: + RET + +TEXT ยท_transpose_uint64_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB54_1 + +LBB54_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB54_5 + +LBB54_1: + WORD $0xd285 // test edx, edx + JLE LBB54_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB54_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB54_3 + +LBB54_4: + RET + +TEXT ยท_transpose_int64_uint64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB55_1 + +LBB55_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB55_5 + +LBB55_1: + WORD $0xd285 // test edx, edx + JLE LBB55_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB55_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB55_3 + +LBB55_4: + RET + +TEXT ยท_transpose_uint8_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB56_1 + +LBB56_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB56_5 + +LBB56_1: + WORD $0xd285 // test edx, edx + JLE LBB56_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB56_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB56_3 + +LBB56_4: + RET + +TEXT ยท_transpose_int8_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB57_1 + +LBB57_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB57_5 + +LBB57_1: + WORD $0xd285 // test edx, edx + JLE LBB57_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB57_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB57_3 + +LBB57_4: + RET + +TEXT ยท_transpose_uint16_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB58_1 + +LBB58_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB58_5 + +LBB58_1: + WORD $0xd285 // test edx, edx + JLE LBB58_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB58_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB58_3 + +LBB58_4: + RET + +TEXT ยท_transpose_int16_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB59_1 + +LBB59_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB59_5 + +LBB59_1: + WORD $0xd285 // test edx, edx + JLE LBB59_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB59_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB59_3 + +LBB59_4: + RET + +TEXT ยท_transpose_uint32_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB60_1 + +LBB60_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB60_5 + +LBB60_1: + WORD $0xd285 // test edx, edx + JLE LBB60_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB60_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB60_3 + +LBB60_4: + RET + +TEXT ยท_transpose_int32_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB61_1 + +LBB61_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB61_5 + +LBB61_1: + WORD $0xd285 // test edx, edx + JLE LBB61_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB61_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB61_3 + +LBB61_4: + RET + +TEXT ยท_transpose_uint64_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB62_1 + +LBB62_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB62_5 + +LBB62_1: + WORD $0xd285 // test edx, edx + JLE LBB62_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB62_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB62_3 + +LBB62_4: + RET + +TEXT ยท_transpose_int64_int64_avx2(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB63_1 + +LBB63_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB63_5 + +LBB63_1: + WORD $0xd285 // test edx, edx + JLE LBB63_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB63_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB63_3 + +LBB63_4: + RET diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go new file mode 100644 index 00000000..c9e1c84d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go @@ -0,0 +1,227 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "errors" + + "github.com/apache/arrow/go/v12/arrow" +) + +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=avx2 transpose_ints_simd.go.tmpl=transpose_ints_avx2_amd64.go +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=sse4 transpose_ints_simd.go.tmpl=transpose_ints_sse4_amd64.go +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_s390x.go.tmpl=transpose_ints_s390x.go +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_s390x.go.tmpl=transpose_ints_arm64.go +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_noasm.go.tmpl=transpose_ints_noasm.go +//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints.go.tmpl=transpose_ints.go + +func bufToTyped(typ arrow.DataType, buf []byte, offset, length int) (interface{}, error) { + switch typ.ID() { + case arrow.INT8: + return arrow.Int8Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.INT16: + return arrow.Int16Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.INT32: + return arrow.Int32Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.INT64: + return arrow.Int64Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.UINT8: + return arrow.Uint8Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.UINT16: + return arrow.Uint16Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.UINT32: + return arrow.Uint32Traits.CastFromBytes(buf)[offset : offset+length], nil + case arrow.UINT64: + return arrow.Uint64Traits.CastFromBytes(buf)[offset : offset+length], nil + } + return nil, errors.New("only accepts integral types") +} + +// TransposeIntsBuffers takes the data-types, byte buffers, and offsets of a source and destination +// buffer to perform TransposeInts on with the provided mapping data. +func TransposeIntsBuffers(inType, outType arrow.DataType, indata, outdata []byte, inOffset, outOffset int, length int, transposeMap []int32) error { + src, err := bufToTyped(inType, indata, inOffset, length) + if err != nil { + return err + } + dest, err := bufToTyped(outType, outdata, outOffset, length) + if err != nil { + return err + } + + return TransposeInts(src, dest, transposeMap) +} + +// TransposeInts expects two integral slices and the values they map to. Returning +// an error if either src or dest are not an integral type. +func TransposeInts(src, dest interface{}, mapping []int32) error { + switch s := src.(type) { + case []int8: + switch d := dest.(type) { + case []int8: + TransposeInt8Int8(s, d, mapping) + case []int16: + TransposeInt8Int16(s, d, mapping) + case []int32: + TransposeInt8Int32(s, d, mapping) + case []int64: + TransposeInt8Int64(s, d, mapping) + case []uint8: + TransposeInt8Uint8(s, d, mapping) + case []uint16: + TransposeInt8Uint16(s, d, mapping) + case []uint32: + TransposeInt8Uint32(s, d, mapping) + case []uint64: + TransposeInt8Uint64(s, d, mapping) + } + case []int16: + switch d := dest.(type) { + case []int8: + TransposeInt16Int8(s, d, mapping) + case []int16: + TransposeInt16Int16(s, d, mapping) + case []int32: + TransposeInt16Int32(s, d, mapping) + case []int64: + TransposeInt16Int64(s, d, mapping) + case []uint8: + TransposeInt16Uint8(s, d, mapping) + case []uint16: + TransposeInt16Uint16(s, d, mapping) + case []uint32: + TransposeInt16Uint32(s, d, mapping) + case []uint64: + TransposeInt16Uint64(s, d, mapping) + } + case []int32: + switch d := dest.(type) { + case []int8: + TransposeInt32Int8(s, d, mapping) + case []int16: + TransposeInt32Int16(s, d, mapping) + case []int32: + TransposeInt32Int32(s, d, mapping) + case []int64: + TransposeInt32Int64(s, d, mapping) + case []uint8: + TransposeInt32Uint8(s, d, mapping) + case []uint16: + TransposeInt32Uint16(s, d, mapping) + case []uint32: + TransposeInt32Uint32(s, d, mapping) + case []uint64: + TransposeInt32Uint64(s, d, mapping) + } + case []int64: + switch d := dest.(type) { + case []int8: + TransposeInt64Int8(s, d, mapping) + case []int16: + TransposeInt64Int16(s, d, mapping) + case []int32: + TransposeInt64Int32(s, d, mapping) + case []int64: + TransposeInt64Int64(s, d, mapping) + case []uint8: + TransposeInt64Uint8(s, d, mapping) + case []uint16: + TransposeInt64Uint16(s, d, mapping) + case []uint32: + TransposeInt64Uint32(s, d, mapping) + case []uint64: + TransposeInt64Uint64(s, d, mapping) + } + case []uint8: + switch d := dest.(type) { + case []int8: + TransposeUint8Int8(s, d, mapping) + case []int16: + TransposeUint8Int16(s, d, mapping) + case []int32: + TransposeUint8Int32(s, d, mapping) + case []int64: + TransposeUint8Int64(s, d, mapping) + case []uint8: + TransposeUint8Uint8(s, d, mapping) + case []uint16: + TransposeUint8Uint16(s, d, mapping) + case []uint32: + TransposeUint8Uint32(s, d, mapping) + case []uint64: + TransposeUint8Uint64(s, d, mapping) + } + case []uint16: + switch d := dest.(type) { + case []int8: + TransposeUint16Int8(s, d, mapping) + case []int16: + TransposeUint16Int16(s, d, mapping) + case []int32: + TransposeUint16Int32(s, d, mapping) + case []int64: + TransposeUint16Int64(s, d, mapping) + case []uint8: + TransposeUint16Uint8(s, d, mapping) + case []uint16: + TransposeUint16Uint16(s, d, mapping) + case []uint32: + TransposeUint16Uint32(s, d, mapping) + case []uint64: + TransposeUint16Uint64(s, d, mapping) + } + case []uint32: + switch d := dest.(type) { + case []int8: + TransposeUint32Int8(s, d, mapping) + case []int16: + TransposeUint32Int16(s, d, mapping) + case []int32: + TransposeUint32Int32(s, d, mapping) + case []int64: + TransposeUint32Int64(s, d, mapping) + case []uint8: + TransposeUint32Uint8(s, d, mapping) + case []uint16: + TransposeUint32Uint16(s, d, mapping) + case []uint32: + TransposeUint32Uint32(s, d, mapping) + case []uint64: + TransposeUint32Uint64(s, d, mapping) + } + case []uint64: + switch d := dest.(type) { + case []int8: + TransposeUint64Int8(s, d, mapping) + case []int16: + TransposeUint64Int16(s, d, mapping) + case []int32: + TransposeUint64Int32(s, d, mapping) + case []int64: + TransposeUint64Int64(s, d, mapping) + case []uint8: + TransposeUint64Uint8(s, d, mapping) + case []uint16: + TransposeUint64Uint16(s, d, mapping) + case []uint32: + TransposeUint64Uint32(s, d, mapping) + case []uint64: + TransposeUint64Uint64(s, d, mapping) + } + } + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go new file mode 100644 index 00000000..461aaf31 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go @@ -0,0 +1,96 @@ +// Code generated by transpose_ints_noasm.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build noasm || (!amd64 && !arm64 && !s390x && !ppc64le) + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( + TransposeInt8Int8 = transposeInt8Int8 + TransposeInt8Uint8 = transposeInt8Uint8 + TransposeInt8Int16 = transposeInt8Int16 + TransposeInt8Uint16 = transposeInt8Uint16 + TransposeInt8Int32 = transposeInt8Int32 + TransposeInt8Uint32 = transposeInt8Uint32 + TransposeInt8Int64 = transposeInt8Int64 + TransposeInt8Uint64 = transposeInt8Uint64 + + TransposeUint8Int8 = transposeUint8Int8 + TransposeUint8Uint8 = transposeUint8Uint8 + TransposeUint8Int16 = transposeUint8Int16 + TransposeUint8Uint16 = transposeUint8Uint16 + TransposeUint8Int32 = transposeUint8Int32 + TransposeUint8Uint32 = transposeUint8Uint32 + TransposeUint8Int64 = transposeUint8Int64 + TransposeUint8Uint64 = transposeUint8Uint64 + + TransposeInt16Int8 = transposeInt16Int8 + TransposeInt16Uint8 = transposeInt16Uint8 + TransposeInt16Int16 = transposeInt16Int16 + TransposeInt16Uint16 = transposeInt16Uint16 + TransposeInt16Int32 = transposeInt16Int32 + TransposeInt16Uint32 = transposeInt16Uint32 + TransposeInt16Int64 = transposeInt16Int64 + TransposeInt16Uint64 = transposeInt16Uint64 + + TransposeUint16Int8 = transposeUint16Int8 + TransposeUint16Uint8 = transposeUint16Uint8 + TransposeUint16Int16 = transposeUint16Int16 + TransposeUint16Uint16 = transposeUint16Uint16 + TransposeUint16Int32 = transposeUint16Int32 + TransposeUint16Uint32 = transposeUint16Uint32 + TransposeUint16Int64 = transposeUint16Int64 + TransposeUint16Uint64 = transposeUint16Uint64 + + TransposeInt32Int8 = transposeInt32Int8 + TransposeInt32Uint8 = transposeInt32Uint8 + TransposeInt32Int16 = transposeInt32Int16 + TransposeInt32Uint16 = transposeInt32Uint16 + TransposeInt32Int32 = transposeInt32Int32 + TransposeInt32Uint32 = transposeInt32Uint32 + TransposeInt32Int64 = transposeInt32Int64 + TransposeInt32Uint64 = transposeInt32Uint64 + + TransposeUint32Int8 = transposeUint32Int8 + TransposeUint32Uint8 = transposeUint32Uint8 + TransposeUint32Int16 = transposeUint32Int16 + TransposeUint32Uint16 = transposeUint32Uint16 + TransposeUint32Int32 = transposeUint32Int32 + TransposeUint32Uint32 = transposeUint32Uint32 + TransposeUint32Int64 = transposeUint32Int64 + TransposeUint32Uint64 = transposeUint32Uint64 + + TransposeInt64Int8 = transposeInt64Int8 + TransposeInt64Uint8 = transposeInt64Uint8 + TransposeInt64Int16 = transposeInt64Int16 + TransposeInt64Uint16 = transposeInt64Uint16 + TransposeInt64Int32 = transposeInt64Int32 + TransposeInt64Uint32 = transposeInt64Uint32 + TransposeInt64Int64 = transposeInt64Int64 + TransposeInt64Uint64 = transposeInt64Uint64 + + TransposeUint64Int8 = transposeUint64Int8 + TransposeUint64Uint8 = transposeUint64Uint8 + TransposeUint64Int16 = transposeUint64Int16 + TransposeUint64Uint16 = transposeUint64Uint16 + TransposeUint64Int32 = transposeUint64Int32 + TransposeUint64Uint32 = transposeUint64Uint32 + TransposeUint64Int64 = transposeUint64Int64 + TransposeUint64Uint64 = transposeUint64Uint64 +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go.tmpl new file mode 100644 index 00000000..faffdce3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go.tmpl @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build noasm +// +build noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} = transpose{{$srcName}}{{$destName}} +{{end}} +{{end}} +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_ppc64le.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_ppc64le.go new file mode 100644 index 00000000..cc957cda --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_ppc64le.go @@ -0,0 +1,96 @@ +// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( + TransposeInt8Int8 = transposeInt8Int8 + TransposeInt8Uint8 = transposeInt8Uint8 + TransposeInt8Int16 = transposeInt8Int16 + TransposeInt8Uint16 = transposeInt8Uint16 + TransposeInt8Int32 = transposeInt8Int32 + TransposeInt8Uint32 = transposeInt8Uint32 + TransposeInt8Int64 = transposeInt8Int64 + TransposeInt8Uint64 = transposeInt8Uint64 + + TransposeUint8Int8 = transposeUint8Int8 + TransposeUint8Uint8 = transposeUint8Uint8 + TransposeUint8Int16 = transposeUint8Int16 + TransposeUint8Uint16 = transposeUint8Uint16 + TransposeUint8Int32 = transposeUint8Int32 + TransposeUint8Uint32 = transposeUint8Uint32 + TransposeUint8Int64 = transposeUint8Int64 + TransposeUint8Uint64 = transposeUint8Uint64 + + TransposeInt16Int8 = transposeInt16Int8 + TransposeInt16Uint8 = transposeInt16Uint8 + TransposeInt16Int16 = transposeInt16Int16 + TransposeInt16Uint16 = transposeInt16Uint16 + TransposeInt16Int32 = transposeInt16Int32 + TransposeInt16Uint32 = transposeInt16Uint32 + TransposeInt16Int64 = transposeInt16Int64 + TransposeInt16Uint64 = transposeInt16Uint64 + + TransposeUint16Int8 = transposeUint16Int8 + TransposeUint16Uint8 = transposeUint16Uint8 + TransposeUint16Int16 = transposeUint16Int16 + TransposeUint16Uint16 = transposeUint16Uint16 + TransposeUint16Int32 = transposeUint16Int32 + TransposeUint16Uint32 = transposeUint16Uint32 + TransposeUint16Int64 = transposeUint16Int64 + TransposeUint16Uint64 = transposeUint16Uint64 + + TransposeInt32Int8 = transposeInt32Int8 + TransposeInt32Uint8 = transposeInt32Uint8 + TransposeInt32Int16 = transposeInt32Int16 + TransposeInt32Uint16 = transposeInt32Uint16 + TransposeInt32Int32 = transposeInt32Int32 + TransposeInt32Uint32 = transposeInt32Uint32 + TransposeInt32Int64 = transposeInt32Int64 + TransposeInt32Uint64 = transposeInt32Uint64 + + TransposeUint32Int8 = transposeUint32Int8 + TransposeUint32Uint8 = transposeUint32Uint8 + TransposeUint32Int16 = transposeUint32Int16 + TransposeUint32Uint16 = transposeUint32Uint16 + TransposeUint32Int32 = transposeUint32Int32 + TransposeUint32Uint32 = transposeUint32Uint32 + TransposeUint32Int64 = transposeUint32Int64 + TransposeUint32Uint64 = transposeUint32Uint64 + + TransposeInt64Int8 = transposeInt64Int8 + TransposeInt64Uint8 = transposeInt64Uint8 + TransposeInt64Int16 = transposeInt64Int16 + TransposeInt64Uint16 = transposeInt64Uint16 + TransposeInt64Int32 = transposeInt64Int32 + TransposeInt64Uint32 = transposeInt64Uint32 + TransposeInt64Int64 = transposeInt64Int64 + TransposeInt64Uint64 = transposeInt64Uint64 + + TransposeUint64Int8 = transposeUint64Int8 + TransposeUint64Uint8 = transposeUint64Uint8 + TransposeUint64Int16 = transposeUint64Int16 + TransposeUint64Uint16 = transposeUint64Uint16 + TransposeUint64Int32 = transposeUint64Int32 + TransposeUint64Uint32 = transposeUint64Uint32 + TransposeUint64Int64 = transposeUint64Int64 + TransposeUint64Uint64 = transposeUint64Uint64 +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go new file mode 100644 index 00000000..cc957cda --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go @@ -0,0 +1,96 @@ +// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( + TransposeInt8Int8 = transposeInt8Int8 + TransposeInt8Uint8 = transposeInt8Uint8 + TransposeInt8Int16 = transposeInt8Int16 + TransposeInt8Uint16 = transposeInt8Uint16 + TransposeInt8Int32 = transposeInt8Int32 + TransposeInt8Uint32 = transposeInt8Uint32 + TransposeInt8Int64 = transposeInt8Int64 + TransposeInt8Uint64 = transposeInt8Uint64 + + TransposeUint8Int8 = transposeUint8Int8 + TransposeUint8Uint8 = transposeUint8Uint8 + TransposeUint8Int16 = transposeUint8Int16 + TransposeUint8Uint16 = transposeUint8Uint16 + TransposeUint8Int32 = transposeUint8Int32 + TransposeUint8Uint32 = transposeUint8Uint32 + TransposeUint8Int64 = transposeUint8Int64 + TransposeUint8Uint64 = transposeUint8Uint64 + + TransposeInt16Int8 = transposeInt16Int8 + TransposeInt16Uint8 = transposeInt16Uint8 + TransposeInt16Int16 = transposeInt16Int16 + TransposeInt16Uint16 = transposeInt16Uint16 + TransposeInt16Int32 = transposeInt16Int32 + TransposeInt16Uint32 = transposeInt16Uint32 + TransposeInt16Int64 = transposeInt16Int64 + TransposeInt16Uint64 = transposeInt16Uint64 + + TransposeUint16Int8 = transposeUint16Int8 + TransposeUint16Uint8 = transposeUint16Uint8 + TransposeUint16Int16 = transposeUint16Int16 + TransposeUint16Uint16 = transposeUint16Uint16 + TransposeUint16Int32 = transposeUint16Int32 + TransposeUint16Uint32 = transposeUint16Uint32 + TransposeUint16Int64 = transposeUint16Int64 + TransposeUint16Uint64 = transposeUint16Uint64 + + TransposeInt32Int8 = transposeInt32Int8 + TransposeInt32Uint8 = transposeInt32Uint8 + TransposeInt32Int16 = transposeInt32Int16 + TransposeInt32Uint16 = transposeInt32Uint16 + TransposeInt32Int32 = transposeInt32Int32 + TransposeInt32Uint32 = transposeInt32Uint32 + TransposeInt32Int64 = transposeInt32Int64 + TransposeInt32Uint64 = transposeInt32Uint64 + + TransposeUint32Int8 = transposeUint32Int8 + TransposeUint32Uint8 = transposeUint32Uint8 + TransposeUint32Int16 = transposeUint32Int16 + TransposeUint32Uint16 = transposeUint32Uint16 + TransposeUint32Int32 = transposeUint32Int32 + TransposeUint32Uint32 = transposeUint32Uint32 + TransposeUint32Int64 = transposeUint32Int64 + TransposeUint32Uint64 = transposeUint32Uint64 + + TransposeInt64Int8 = transposeInt64Int8 + TransposeInt64Uint8 = transposeInt64Uint8 + TransposeInt64Int16 = transposeInt64Int16 + TransposeInt64Uint16 = transposeInt64Uint16 + TransposeInt64Int32 = transposeInt64Int32 + TransposeInt64Uint32 = transposeInt64Uint32 + TransposeInt64Int64 = transposeInt64Int64 + TransposeInt64Uint64 = transposeInt64Uint64 + + TransposeUint64Int8 = transposeUint64Int8 + TransposeUint64Uint8 = transposeUint64Uint8 + TransposeUint64Int16 = transposeUint64Int16 + TransposeUint64Uint16 = transposeUint64Uint16 + TransposeUint64Int32 = transposeUint64Int32 + TransposeUint64Uint32 = transposeUint64Uint32 + TransposeUint64Int64 = transposeUint64Int64 + TransposeUint64Uint64 = transposeUint64Uint64 +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go.tmpl new file mode 100644 index 00000000..d93c8779 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go.tmpl @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package utils + +// if building with the 'noasm' tag, then point to the pure go implementations +var ( +{{ $typelist := .In }} +{{range .In}} +{{ $src := .Type -}} +{{ $srcName := .Name -}} +{{ range $typelist -}} +{{ $dest := .Type -}} +{{ $destName := .Name -}} + Transpose{{$srcName}}{{$destName}} = transpose{{$srcName}}{{$destName}} +{{end}} +{{end}} +) diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_simd.go.tmpl b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_simd.go.tmpl new file mode 100644 index 00000000..034d0e9d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_simd.go.tmpl @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm +// +build !noasm + +package utils + +import ( + "unsafe" +) + +{{ $arch := .D.arch}} +{{ $typelist := .In}} +{{range .In}} +{{ $src := .Type }} +{{ $srcName := .Name }} +{{ range $typelist}} +{{ $dest := .Type }} +{{ $destName := .Name }} + +//go:noescape +func _transpose_{{printf "%s_%s_%s" $src $dest $arch}}(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transpose{{ $srcName }}{{ $destName }}{{ $arch }}(src []{{$src}}, dest []{{$dest}}, transposeMap []int32) { + _transpose_{{printf "%s_%s_%s" $src $dest $arch}}(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} +{{ end }} +{{ end }} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.go new file mode 100644 index 00000000..241ca74a --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.go @@ -0,0 +1,473 @@ +// Code generated by transpose_ints_simd.go.tmpl. DO NOT EDIT. + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !noasm + +package utils + +import ( + "unsafe" +) + +//go:noescape +func _transpose_int8_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int8sse4(src []int8, dest []int8, transposeMap []int32) { + _transpose_int8_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint8sse4(src []int8, dest []uint8, transposeMap []int32) { + _transpose_int8_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int16sse4(src []int8, dest []int16, transposeMap []int32) { + _transpose_int8_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint16sse4(src []int8, dest []uint16, transposeMap []int32) { + _transpose_int8_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int32sse4(src []int8, dest []int32, transposeMap []int32) { + _transpose_int8_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint32sse4(src []int8, dest []uint32, transposeMap []int32) { + _transpose_int8_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Int64sse4(src []int8, dest []int64, transposeMap []int32) { + _transpose_int8_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int8_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt8Uint64sse4(src []int8, dest []uint64, transposeMap []int32) { + _transpose_int8_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int8sse4(src []uint8, dest []int8, transposeMap []int32) { + _transpose_uint8_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint8sse4(src []uint8, dest []uint8, transposeMap []int32) { + _transpose_uint8_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int16sse4(src []uint8, dest []int16, transposeMap []int32) { + _transpose_uint8_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint16sse4(src []uint8, dest []uint16, transposeMap []int32) { + _transpose_uint8_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int32sse4(src []uint8, dest []int32, transposeMap []int32) { + _transpose_uint8_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint32sse4(src []uint8, dest []uint32, transposeMap []int32) { + _transpose_uint8_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Int64sse4(src []uint8, dest []int64, transposeMap []int32) { + _transpose_uint8_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint8_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint8Uint64sse4(src []uint8, dest []uint64, transposeMap []int32) { + _transpose_uint8_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int8sse4(src []int16, dest []int8, transposeMap []int32) { + _transpose_int16_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint8sse4(src []int16, dest []uint8, transposeMap []int32) { + _transpose_int16_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int16sse4(src []int16, dest []int16, transposeMap []int32) { + _transpose_int16_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint16sse4(src []int16, dest []uint16, transposeMap []int32) { + _transpose_int16_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int32sse4(src []int16, dest []int32, transposeMap []int32) { + _transpose_int16_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint32sse4(src []int16, dest []uint32, transposeMap []int32) { + _transpose_int16_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Int64sse4(src []int16, dest []int64, transposeMap []int32) { + _transpose_int16_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int16_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt16Uint64sse4(src []int16, dest []uint64, transposeMap []int32) { + _transpose_int16_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int8sse4(src []uint16, dest []int8, transposeMap []int32) { + _transpose_uint16_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint8sse4(src []uint16, dest []uint8, transposeMap []int32) { + _transpose_uint16_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int16sse4(src []uint16, dest []int16, transposeMap []int32) { + _transpose_uint16_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint16sse4(src []uint16, dest []uint16, transposeMap []int32) { + _transpose_uint16_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int32sse4(src []uint16, dest []int32, transposeMap []int32) { + _transpose_uint16_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint32sse4(src []uint16, dest []uint32, transposeMap []int32) { + _transpose_uint16_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Int64sse4(src []uint16, dest []int64, transposeMap []int32) { + _transpose_uint16_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint16_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint16Uint64sse4(src []uint16, dest []uint64, transposeMap []int32) { + _transpose_uint16_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int8sse4(src []int32, dest []int8, transposeMap []int32) { + _transpose_int32_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint8sse4(src []int32, dest []uint8, transposeMap []int32) { + _transpose_int32_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int16sse4(src []int32, dest []int16, transposeMap []int32) { + _transpose_int32_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint16sse4(src []int32, dest []uint16, transposeMap []int32) { + _transpose_int32_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int32sse4(src []int32, dest []int32, transposeMap []int32) { + _transpose_int32_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint32sse4(src []int32, dest []uint32, transposeMap []int32) { + _transpose_int32_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Int64sse4(src []int32, dest []int64, transposeMap []int32) { + _transpose_int32_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int32_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt32Uint64sse4(src []int32, dest []uint64, transposeMap []int32) { + _transpose_int32_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int8sse4(src []uint32, dest []int8, transposeMap []int32) { + _transpose_uint32_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint8sse4(src []uint32, dest []uint8, transposeMap []int32) { + _transpose_uint32_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int16sse4(src []uint32, dest []int16, transposeMap []int32) { + _transpose_uint32_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint16sse4(src []uint32, dest []uint16, transposeMap []int32) { + _transpose_uint32_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int32sse4(src []uint32, dest []int32, transposeMap []int32) { + _transpose_uint32_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint32sse4(src []uint32, dest []uint32, transposeMap []int32) { + _transpose_uint32_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Int64sse4(src []uint32, dest []int64, transposeMap []int32) { + _transpose_uint32_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint32_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint32Uint64sse4(src []uint32, dest []uint64, transposeMap []int32) { + _transpose_uint32_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int8sse4(src []int64, dest []int8, transposeMap []int32) { + _transpose_int64_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint8sse4(src []int64, dest []uint8, transposeMap []int32) { + _transpose_int64_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int16sse4(src []int64, dest []int16, transposeMap []int32) { + _transpose_int64_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint16sse4(src []int64, dest []uint16, transposeMap []int32) { + _transpose_int64_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int32sse4(src []int64, dest []int32, transposeMap []int32) { + _transpose_int64_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint32sse4(src []int64, dest []uint32, transposeMap []int32) { + _transpose_int64_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Int64sse4(src []int64, dest []int64, transposeMap []int32) { + _transpose_int64_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_int64_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeInt64Uint64sse4(src []int64, dest []uint64, transposeMap []int32) { + _transpose_int64_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int8sse4(src []uint64, dest []int8, transposeMap []int32) { + _transpose_uint64_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint8sse4(src []uint64, dest []uint8, transposeMap []int32) { + _transpose_uint64_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int16sse4(src []uint64, dest []int16, transposeMap []int32) { + _transpose_uint64_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint16sse4(src []uint64, dest []uint16, transposeMap []int32) { + _transpose_uint64_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int32sse4(src []uint64, dest []int32, transposeMap []int32) { + _transpose_uint64_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint32sse4(src []uint64, dest []uint32, transposeMap []int32) { + _transpose_uint64_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Int64sse4(src []uint64, dest []int64, transposeMap []int32) { + _transpose_uint64_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} + +//go:noescape +func _transpose_uint64_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer) + +func transposeUint64Uint64sse4(src []uint64, dest []uint64, transposeMap []int32) { + _transpose_uint64_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0])) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.s new file mode 100644 index 00000000..ee5199a5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.s @@ -0,0 +1,3074 @@ +//+build !noasm !appengine +// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT + +TEXT ยท_transpose_uint8_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB0_1 + +LBB0_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB0_5 + +LBB0_1: + WORD $0xd285 // test edx, edx + JLE LBB0_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB0_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB0_3 + +LBB0_4: + RET + +TEXT ยท_transpose_int8_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB1_1 + +LBB1_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB1_5 + +LBB1_1: + WORD $0xd285 // test edx, edx + JLE LBB1_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB1_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB1_3 + +LBB1_4: + RET + +TEXT ยท_transpose_uint16_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB2_1 + +LBB2_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB2_5 + +LBB2_1: + WORD $0xd285 // test edx, edx + JLE LBB2_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB2_3: + LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB2_3 + +LBB2_4: + RET + +TEXT ยท_transpose_int16_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB3_1 + +LBB3_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB3_5 + +LBB3_1: + WORD $0xd285 // test edx, edx + JLE LBB3_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB3_3: + LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB3_3 + +LBB3_4: + RET + +TEXT ยท_transpose_uint32_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB4_1 + +LBB4_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB4_5 + +LBB4_1: + WORD $0xd285 // test edx, edx + JLE LBB4_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB4_3: + LONG $0x87048b42 // mov eax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB4_3 + +LBB4_4: + RET + +TEXT ยท_transpose_int32_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB5_1 + +LBB5_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB5_5 + +LBB5_1: + WORD $0xd285 // test edx, edx + JLE LBB5_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB5_3: + LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB5_3 + +LBB5_4: + RET + +TEXT ยท_transpose_uint64_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB6_1 + +LBB6_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB6_5 + +LBB6_1: + WORD $0xd285 // test edx, edx + JLE LBB6_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB6_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB6_3 + +LBB6_4: + RET + +TEXT ยท_transpose_int64_uint8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB7_1 + +LBB7_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB7_5 + +LBB7_1: + WORD $0xd285 // test edx, edx + JLE LBB7_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB7_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB7_3 + +LBB7_4: + RET + +TEXT ยท_transpose_uint8_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB8_1 + +LBB8_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB8_5 + +LBB8_1: + WORD $0xd285 // test edx, edx + JLE LBB8_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB8_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB8_3 + +LBB8_4: + RET + +TEXT ยท_transpose_int8_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB9_1 + +LBB9_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB9_5 + +LBB9_1: + WORD $0xd285 // test edx, edx + JLE LBB9_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB9_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB9_3 + +LBB9_4: + RET + +TEXT ยท_transpose_uint16_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB10_1 + +LBB10_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB10_5 + +LBB10_1: + WORD $0xd285 // test edx, edx + JLE LBB10_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB10_3: + LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB10_3 + +LBB10_4: + RET + +TEXT ยท_transpose_int16_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB11_1 + +LBB11_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB11_5 + +LBB11_1: + WORD $0xd285 // test edx, edx + JLE LBB11_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB11_3: + LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB11_3 + +LBB11_4: + RET + +TEXT ยท_transpose_uint32_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB12_1 + +LBB12_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB12_5 + +LBB12_1: + WORD $0xd285 // test edx, edx + JLE LBB12_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB12_3: + LONG $0x87048b42 // mov eax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB12_3 + +LBB12_4: + RET + +TEXT ยท_transpose_int32_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB13_1 + +LBB13_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB13_5 + +LBB13_1: + WORD $0xd285 // test edx, edx + JLE LBB13_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB13_3: + LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB13_3 + +LBB13_4: + RET + +TEXT ยท_transpose_uint64_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB14_1 + +LBB14_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB14_5 + +LBB14_1: + WORD $0xd285 // test edx, edx + JLE LBB14_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB14_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB14_3 + +LBB14_4: + RET + +TEXT ยท_transpose_int64_int8_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB15_1 + +LBB15_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x1688 // mov byte [rsi], dl + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx] + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x04c68348 // add rsi, 4 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB15_5 + +LBB15_1: + WORD $0xd285 // test edx, edx + JLE LBB15_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB15_3: + LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8] + LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax] + LONG $0x06048842 // mov byte [rsi + r8], al + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB15_3 + +LBB15_4: + RET + +TEXT ยท_transpose_uint8_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB16_1 + +LBB16_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB16_5 + +LBB16_1: + WORD $0xd285 // test edx, edx + JLE LBB16_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB16_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB16_3 + +LBB16_4: + RET + +TEXT ยท_transpose_int8_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB17_1 + +LBB17_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB17_5 + +LBB17_1: + WORD $0xd285 // test edx, edx + JLE LBB17_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB17_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB17_3 + +LBB17_4: + RET + +TEXT ยท_transpose_uint16_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB18_1 + +LBB18_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB18_5 + +LBB18_1: + WORD $0xd285 // test edx, edx + JLE LBB18_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB18_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB18_3 + +LBB18_4: + RET + +TEXT ยท_transpose_int16_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB19_1 + +LBB19_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB19_5 + +LBB19_1: + WORD $0xd285 // test edx, edx + JLE LBB19_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB19_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB19_3 + +LBB19_4: + RET + +TEXT ยท_transpose_uint32_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB20_1 + +LBB20_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB20_5 + +LBB20_1: + WORD $0xd285 // test edx, edx + JLE LBB20_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB20_3: + LONG $0x47048b42 // mov eax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB20_3 + +LBB20_4: + RET + +TEXT ยท_transpose_int32_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB21_1 + +LBB21_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB21_5 + +LBB21_1: + WORD $0xd285 // test edx, edx + JLE LBB21_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB21_3: + LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB21_3 + +LBB21_4: + RET + +TEXT ยท_transpose_uint64_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB22_1 + +LBB22_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB22_5 + +LBB22_1: + WORD $0xd285 // test edx, edx + JLE LBB22_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB22_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB22_3 + +LBB22_4: + RET + +TEXT ยท_transpose_int64_uint16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB23_1 + +LBB23_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB23_5 + +LBB23_1: + WORD $0xd285 // test edx, edx + JLE LBB23_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB23_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB23_3 + +LBB23_4: + RET + +TEXT ยท_transpose_uint8_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB24_1 + +LBB24_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB24_5 + +LBB24_1: + WORD $0xd285 // test edx, edx + JLE LBB24_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB24_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB24_3 + +LBB24_4: + RET + +TEXT ยท_transpose_int8_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB25_1 + +LBB25_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB25_5 + +LBB25_1: + WORD $0xd285 // test edx, edx + JLE LBB25_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB25_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB25_3 + +LBB25_4: + RET + +TEXT ยท_transpose_uint16_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB26_1 + +LBB26_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB26_5 + +LBB26_1: + WORD $0xd285 // test edx, edx + JLE LBB26_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB26_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB26_3 + +LBB26_4: + RET + +TEXT ยท_transpose_int16_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB27_1 + +LBB27_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB27_5 + +LBB27_1: + WORD $0xd285 // test edx, edx + JLE LBB27_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB27_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB27_3 + +LBB27_4: + RET + +TEXT ยท_transpose_uint32_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB28_1 + +LBB28_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB28_5 + +LBB28_1: + WORD $0xd285 // test edx, edx + JLE LBB28_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB28_3: + LONG $0x47048b42 // mov eax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB28_3 + +LBB28_4: + RET + +TEXT ยท_transpose_int32_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB29_1 + +LBB29_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB29_5 + +LBB29_1: + WORD $0xd285 // test edx, edx + JLE LBB29_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB29_3: + LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB29_3 + +LBB29_4: + RET + +TEXT ยท_transpose_uint64_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB30_1 + +LBB30_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB30_5 + +LBB30_1: + WORD $0xd285 // test edx, edx + JLE LBB30_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB30_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB30_3 + +LBB30_4: + RET + +TEXT ยท_transpose_int64_int16_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB31_1 + +LBB31_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + WORD $0x8966; BYTE $0x16 // mov word [rsi], dx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x02568966 // mov word [rsi + 2], dx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x04568966 // mov word [rsi + 4], dx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx] + LONG $0x06568966 // mov word [rsi + 6], dx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x08c68348 // add rsi, 8 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB31_5 + +LBB31_1: + WORD $0xd285 // test edx, edx + JLE LBB31_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB31_3: + LONG $0x87048b4a // mov rax, qword [rdi + 4*r8] + LONG $0x8104b70f // movzx eax, word [rcx + 4*rax] + LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB31_3 + +LBB31_4: + RET + +TEXT ยท_transpose_uint8_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB32_1 + +LBB32_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB32_5 + +LBB32_1: + WORD $0xd285 // test edx, edx + JLE LBB32_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB32_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB32_3 + +LBB32_4: + RET + +TEXT ยท_transpose_int8_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB33_1 + +LBB33_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB33_5 + +LBB33_1: + WORD $0xd285 // test edx, edx + JLE LBB33_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB33_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB33_3 + +LBB33_4: + RET + +TEXT ยท_transpose_uint16_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB34_1 + +LBB34_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB34_5 + +LBB34_1: + WORD $0xd285 // test edx, edx + JLE LBB34_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB34_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB34_3 + +LBB34_4: + RET + +TEXT ยท_transpose_int16_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB35_1 + +LBB35_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB35_5 + +LBB35_1: + WORD $0xd285 // test edx, edx + JLE LBB35_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB35_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB35_3 + +LBB35_4: + RET + +TEXT ยท_transpose_uint32_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB36_1 + +LBB36_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB36_5 + +LBB36_1: + WORD $0xd285 // test edx, edx + JLE LBB36_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB36_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB36_3 + +LBB36_4: + RET + +TEXT ยท_transpose_int32_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB37_1 + +LBB37_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB37_5 + +LBB37_1: + WORD $0xd285 // test edx, edx + JLE LBB37_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB37_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB37_3 + +LBB37_4: + RET + +TEXT ยท_transpose_uint64_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB38_1 + +LBB38_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB38_5 + +LBB38_1: + WORD $0xd285 // test edx, edx + JLE LBB38_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB38_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB38_3 + +LBB38_4: + RET + +TEXT ยท_transpose_int64_uint32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB39_1 + +LBB39_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB39_5 + +LBB39_1: + WORD $0xd285 // test edx, edx + JLE LBB39_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB39_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB39_3 + +LBB39_4: + RET + +TEXT ยท_transpose_uint8_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB40_1 + +LBB40_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB40_5 + +LBB40_1: + WORD $0xd285 // test edx, edx + JLE LBB40_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB40_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB40_3 + +LBB40_4: + RET + +TEXT ยท_transpose_int8_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB41_1 + +LBB41_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB41_5 + +LBB41_1: + WORD $0xd285 // test edx, edx + JLE LBB41_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB41_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x86048942 // mov dword [rsi + 4*r8], eax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB41_3 + +LBB41_4: + RET + +TEXT ยท_transpose_uint16_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB42_1 + +LBB42_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB42_5 + +LBB42_1: + WORD $0xd285 // test edx, edx + JLE LBB42_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB42_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB42_3 + +LBB42_4: + RET + +TEXT ยท_transpose_int16_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB43_1 + +LBB43_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB43_5 + +LBB43_1: + WORD $0xd285 // test edx, edx + JLE LBB43_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB43_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x46048942 // mov dword [rsi + 2*r8], eax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB43_3 + +LBB43_4: + RET + +TEXT ยท_transpose_uint32_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB44_1 + +LBB44_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB44_5 + +LBB44_1: + WORD $0xd285 // test edx, edx + JLE LBB44_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB44_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB44_3 + +LBB44_4: + RET + +TEXT ยท_transpose_int32_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB45_1 + +LBB45_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB45_5 + +LBB45_1: + WORD $0xd285 // test edx, edx + JLE LBB45_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB45_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB45_3 + +LBB45_4: + RET + +TEXT ยท_transpose_uint64_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB46_1 + +LBB46_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB46_5 + +LBB46_1: + WORD $0xd285 // test edx, edx + JLE LBB46_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB46_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB46_3 + +LBB46_4: + RET + +TEXT ยท_transpose_int64_int32_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB47_1 + +LBB47_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x1689 // mov dword [rsi], edx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx] + WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x10c68348 // add rsi, 16 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB47_5 + +LBB47_1: + WORD $0xd285 // test edx, edx + JLE LBB47_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB47_3: + LONG $0x47048b4a // mov rax, qword [rdi + 2*r8] + WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax] + LONG $0x06048942 // mov dword [rsi + r8], eax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB47_3 + +LBB47_4: + RET + +TEXT ยท_transpose_uint8_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB48_1 + +LBB48_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB48_5 + +LBB48_1: + WORD $0xd285 // test edx, edx + JLE LBB48_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB48_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB48_3 + +LBB48_4: + RET + +TEXT ยท_transpose_int8_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB49_1 + +LBB49_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB49_5 + +LBB49_1: + WORD $0xd285 // test edx, edx + JLE LBB49_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB49_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB49_3 + +LBB49_4: + RET + +TEXT ยท_transpose_uint16_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB50_1 + +LBB50_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB50_5 + +LBB50_1: + WORD $0xd285 // test edx, edx + JLE LBB50_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB50_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB50_3 + +LBB50_4: + RET + +TEXT ยท_transpose_int16_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB51_1 + +LBB51_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB51_5 + +LBB51_1: + WORD $0xd285 // test edx, edx + JLE LBB51_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB51_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB51_3 + +LBB51_4: + RET + +TEXT ยท_transpose_uint32_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB52_1 + +LBB52_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB52_5 + +LBB52_1: + WORD $0xd285 // test edx, edx + JLE LBB52_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB52_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB52_3 + +LBB52_4: + RET + +TEXT ยท_transpose_int32_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB53_1 + +LBB53_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB53_5 + +LBB53_1: + WORD $0xd285 // test edx, edx + JLE LBB53_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB53_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB53_3 + +LBB53_4: + RET + +TEXT ยท_transpose_uint64_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB54_1 + +LBB54_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB54_5 + +LBB54_1: + WORD $0xd285 // test edx, edx + JLE LBB54_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB54_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB54_3 + +LBB54_4: + RET + +TEXT ยท_transpose_int64_uint64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB55_1 + +LBB55_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB55_5 + +LBB55_1: + WORD $0xd285 // test edx, edx + JLE LBB55_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB55_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB55_3 + +LBB55_4: + RET + +TEXT ยท_transpose_uint8_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB56_1 + +LBB56_5: + WORD $0xd089 // mov eax, edx + WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0157b60f // movzx edx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0257b60f // movzx edx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0357b60f // movzx edx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB56_5 + +LBB56_1: + WORD $0xd285 // test edx, edx + JLE LBB56_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB56_3: + LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB56_3 + +LBB56_4: + RET + +TEXT ยท_transpose_int8_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB57_1 + +LBB57_5: + WORD $0xd089 // mov eax, edx + LONG $0x17be0f48 // movsx rdx, byte [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x04c78348 // add rdi, 4 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB57_5 + +LBB57_1: + WORD $0xd285 // test edx, edx + JLE LBB57_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB57_3: + LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0xc604894a // mov qword [rsi + 8*r8], rax + LONG $0x01c08349 // add r8, 1 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB57_3 + +LBB57_4: + RET + +TEXT ยท_transpose_uint16_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB58_1 + +LBB58_5: + WORD $0xd089 // mov eax, edx + WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x0257b70f // movzx edx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x0457b70f // movzx edx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0657b70f // movzx edx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB58_5 + +LBB58_1: + WORD $0xd285 // test edx, edx + JLE LBB58_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB58_3: + LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB58_3 + +LBB58_4: + RET + +TEXT ยท_transpose_int16_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB59_1 + +LBB59_5: + WORD $0xd089 // mov eax, edx + LONG $0x17bf0f48 // movsx rdx, word [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x08c78348 // add rdi, 8 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB59_5 + +LBB59_1: + WORD $0xd285 // test edx, edx + JLE LBB59_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB59_3: + LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x8604894a // mov qword [rsi + 4*r8], rax + LONG $0x02c08349 // add r8, 2 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB59_3 + +LBB59_4: + RET + +TEXT ยท_transpose_uint32_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB60_1 + +LBB60_5: + WORD $0xd089 // mov eax, edx + WORD $0x178b // mov edx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB60_5 + +LBB60_1: + WORD $0xd285 // test edx, edx + JLE LBB60_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB60_3: + LONG $0x07048b42 // mov eax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB60_3 + +LBB60_4: + RET + +TEXT ยท_transpose_int32_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB61_1 + +LBB61_5: + WORD $0xd089 // mov eax, edx + WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x04576348 // movsxd rdx, dword [rdi + 4] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x08576348 // movsxd rdx, dword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x0c576348 // movsxd rdx, dword [rdi + 12] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x10c78348 // add rdi, 16 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB61_5 + +LBB61_1: + WORD $0xd285 // test edx, edx + JLE LBB61_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB61_3: + LONG $0x0704634a // movsxd rax, dword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x4604894a // mov qword [rsi + 2*r8], rax + LONG $0x04c08349 // add r8, 4 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB61_3 + +LBB61_4: + RET + +TEXT ยท_transpose_uint64_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB62_1 + +LBB62_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB62_5 + +LBB62_1: + WORD $0xd285 // test edx, edx + JLE LBB62_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB62_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB62_3 + +LBB62_4: + RET + +TEXT ยท_transpose_int64_int64_sse4(SB), $0-32 + + MOVQ src+0(FP), DI + MOVQ dest+8(FP), SI + MOVQ length+16(FP), DX + MOVQ transposeMap+24(FP), CX + + WORD $0xfa83; BYTE $0x04 // cmp edx, 4 + JL LBB63_1 + +LBB63_5: + WORD $0xd089 // mov eax, edx + WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx + LONG $0x08578b48 // mov rdx, qword [rdi + 8] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x08568948 // mov qword [rsi + 8], rdx + LONG $0x10578b48 // mov rdx, qword [rdi + 16] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x10568948 // mov qword [rsi + 16], rdx + LONG $0x18578b48 // mov rdx, qword [rdi + 24] + LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx] + LONG $0x18568948 // mov qword [rsi + 24], rdx + WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4] + LONG $0x20c78348 // add rdi, 32 + LONG $0x20c68348 // add rsi, 32 + WORD $0xf883; BYTE $0x07 // cmp eax, 7 + JG LBB63_5 + +LBB63_1: + WORD $0xd285 // test edx, edx + JLE LBB63_4 + WORD $0xc283; BYTE $0x01 // add edx, 1 + WORD $0x3145; BYTE $0xc0 // xor r8d, r8d + +LBB63_3: + LONG $0x07048b4a // mov rax, qword [rdi + r8] + LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax] + LONG $0x0604894a // mov qword [rsi + r8], rax + LONG $0x08c08349 // add r8, 8 + WORD $0xc283; BYTE $0xff // add edx, -1 + WORD $0xfa83; BYTE $0x01 // cmp edx, 1 + JG LBB63_3 + +LBB63_4: + RET diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore b/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore new file mode 100644 index 00000000..4120c511 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go new file mode 100644 index 00000000..864cde26 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compress + +import ( + "bytes" + "io" + + "github.com/andybalholm/brotli" + "github.com/apache/arrow/go/v12/parquet/internal/debug" +) + +type brotliCodec struct{} + +func (brotliCodec) NewReader(r io.Reader) io.ReadCloser { + return io.NopCloser(brotli.NewReader(r)) +} + +func (b brotliCodec) EncodeLevel(dst, src []byte, level int) []byte { + if level == DefaultCompressionLevel { + level = brotli.DefaultCompression + } + + maxlen := int(b.CompressBound(int64(len(src)))) + if dst == nil || cap(dst) < maxlen { + dst = make([]byte, 0, maxlen) + } + buf := bytes.NewBuffer(dst[:0]) + w := brotli.NewWriterLevel(buf, level) + _, err := w.Write(src) + if err != nil { + panic(err) + } + if err := w.Close(); err != nil { + panic(err) + } + return buf.Bytes() +} + +func (b brotliCodec) Encode(dst, src []byte) []byte { + return b.EncodeLevel(dst, src, brotli.DefaultCompression) +} + +func (brotliCodec) Decode(dst, src []byte) []byte { + rdr := brotli.NewReader(bytes.NewReader(src)) + if dst != nil { + var ( + sofar = 0 + n = -1 + err error = nil + ) + for n != 0 && err == nil { + n, err = rdr.Read(dst[sofar:]) + sofar += n + } + if err != nil && err != io.EOF { + panic(err) + } + return dst[:sofar] + } + + dst, err := io.ReadAll(rdr) + if err != nil { + panic(err) + } + + return dst +} + +// taken from brotli/enc/encode.c:1426 +// BrotliEncoderMaxCompressedSize +func (brotliCodec) CompressBound(len int64) int64 { + // [window bits / empty metadata] + N * [uncompressed] + [last empty] + debug.Assert(len > 0, "brotli compressbound should be > 0") + nlarge := len >> 14 + overhead := 2 + (4 * nlarge) + 3 + 1 + result := len + overhead + if len == 0 { + return 2 + } + if result < len { + return 0 + } + return len +} + +func (brotliCodec) NewWriter(w io.Writer) io.WriteCloser { + return brotli.NewWriter(w) +} + +func (brotliCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { + if level == DefaultCompressionLevel { + level = brotli.DefaultCompression + } + return brotli.NewWriterLevel(w, level), nil +} + +func init() { + codecs[Codecs.Brotli] = brotliCodec{} +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go new file mode 100644 index 00000000..6bc0f1eb --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go @@ -0,0 +1,155 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compress contains the interfaces and implementations for handling compression/decompression +// of parquet data at the column levels. +package compress + +import ( + "compress/flate" + "fmt" + "io" + + "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" +) + +// Compression is an alias to the thrift compression codec enum type for easy use +type Compression parquet.CompressionCodec + +func (c Compression) String() string { + return parquet.CompressionCodec(c).String() +} + +// DefaultCompressionLevel will use flate.DefaultCompression since many of the compression libraries +// use that to denote "use the default". +const DefaultCompressionLevel = flate.DefaultCompression + +// Codecs is a useful struct to provide namespaced enum values to use for specifying the compression type to use +// which make for easy internal swapping between them and the thrift enum since they are initialized to the same +// constant values. +var Codecs = struct { + Uncompressed Compression + Snappy Compression + Gzip Compression + // LZO is unsupported in this library since LZO license is incompatible with Apache License + Lzo Compression + Brotli Compression + // LZ4 unsupported in this library due to problematic issues between the Hadoop LZ4 spec vs regular lz4 + // see: http://mail-archives.apache.org/mod_mbox/arrow-dev/202007.mbox/%3CCAAri41v24xuA8MGHLDvgSnE+7AAgOhiEukemW_oPNHMvfMmrWw@mail.gmail.com%3E + Lz4 Compression + Zstd Compression +}{ + Uncompressed: Compression(parquet.CompressionCodec_UNCOMPRESSED), + Snappy: Compression(parquet.CompressionCodec_SNAPPY), + Gzip: Compression(parquet.CompressionCodec_GZIP), + Lzo: Compression(parquet.CompressionCodec_LZO), + Brotli: Compression(parquet.CompressionCodec_BROTLI), + Lz4: Compression(parquet.CompressionCodec_LZ4), + Zstd: Compression(parquet.CompressionCodec_ZSTD), +} + +// Codec is an interface which is implemented for each compression type in order to make the interactions easy to +// implement. Most consumers won't be calling GetCodec directly. +type Codec interface { + // NewReader provides a reader that wraps a stream with compressed data to stream the uncompressed data + NewReader(io.Reader) io.ReadCloser + // NewWriter provides a wrapper around a write stream to compress data before writing it. + NewWriter(io.Writer) io.WriteCloser + // NewWriterLevel is like NewWriter but allows specifying the compression level + NewWriterLevel(io.Writer, int) (io.WriteCloser, error) + // Encode encodes a block of data given by src and returns the compressed block. dst should be either nil + // or sized large enough to fit the compressed block (use CompressBound to allocate). dst and src should not + // overlap since some of the compression types don't allow it. + // + // The returned slice will be one of the following: + // 1. If dst was nil or dst was too small to fit the compressed data, it will be a newly allocated slice + // 2. If dst was large enough to fit the compressed data (depending on the compression algorithm it might + // be required to be at least CompressBound length) then it might be a slice of dst. + Encode(dst, src []byte) []byte + // EncodeLevel is like Encode, but specifies a particular encoding level instead of the default. + EncodeLevel(dst, src []byte, level int) []byte + // CompressBound returns the boundary of maximum size of compressed data under the chosen codec. + CompressBound(int64) int64 + // Decode is for decoding a single block rather than a stream, like with Encode, dst must be either nil or + // sized large enough to accommodate the uncompressed data and should not overlap with src. + // + // the returned slice *might* be a slice of dst. + Decode(dst, src []byte) []byte +} + +var codecs = map[Compression]Codec{} + +type nocodec struct{} + +func (nocodec) NewReader(r io.Reader) io.ReadCloser { + ret, ok := r.(io.ReadCloser) + if !ok { + return io.NopCloser(r) + } + return ret +} + +func (nocodec) Decode(dst, src []byte) []byte { + if dst != nil { + copy(dst, src) + } + return dst +} + +type writerNopCloser struct { + io.Writer +} + +func (writerNopCloser) Close() error { + return nil +} + +func (nocodec) Encode(dst, src []byte) []byte { + copy(dst, src) + return dst +} + +func (nocodec) EncodeLevel(dst, src []byte, _ int) []byte { + copy(dst, src) + return dst +} + +func (nocodec) NewWriter(w io.Writer) io.WriteCloser { + ret, ok := w.(io.WriteCloser) + if !ok { + return writerNopCloser{w} + } + return ret +} + +func (n nocodec) NewWriterLevel(w io.Writer, _ int) (io.WriteCloser, error) { + return n.NewWriter(w), nil +} + +func (nocodec) CompressBound(len int64) int64 { return len } + +func init() { + codecs[Codecs.Uncompressed] = nocodec{} +} + +// GetCodec returns a Codec interface for the requested Compression type +func GetCodec(typ Compression) (Codec, error) { + ret, ok := codecs[typ] + if !ok { + return nil, fmt.Errorf("compression for %s unimplemented", typ.String()) + } + return ret, nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go new file mode 100644 index 00000000..31f1729e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compress + +import ( + "bytes" + "fmt" + "io" + + "github.com/klauspost/compress/gzip" +) + +type gzipCodec struct{} + +func (gzipCodec) NewReader(r io.Reader) io.ReadCloser { + ret, err := gzip.NewReader(r) + if err != nil { + panic(fmt.Errorf("codec: gzip: %w", err)) + } + return ret +} + +func (gzipCodec) Decode(dst, src []byte) []byte { + rdr, err := gzip.NewReader(bytes.NewReader(src)) + if err != nil { + panic(err) + } + + if dst != nil { + n, err := io.ReadFull(rdr, dst) + if err != nil { + panic(err) + } + return dst[:n] + } + + dst, err = io.ReadAll(rdr) + if err != nil { + panic(err) + } + + return dst +} + +func (g gzipCodec) EncodeLevel(dst, src []byte, level int) []byte { + maxlen := int(g.CompressBound(int64(len(src)))) + if dst == nil || cap(dst) < maxlen { + dst = make([]byte, 0, maxlen) + } + buf := bytes.NewBuffer(dst[:0]) + w, err := gzip.NewWriterLevel(buf, level) + if err != nil { + panic(err) + } + _, err = w.Write(src) + if err != nil { + panic(err) + } + if err := w.Close(); err != nil { + panic(err) + } + return buf.Bytes() +} + +func (g gzipCodec) Encode(dst, src []byte) []byte { + return g.EncodeLevel(dst, src, DefaultCompressionLevel) +} + +func (gzipCodec) CompressBound(len int64) int64 { + return len + ((len + 7) >> 3) + ((len + 63) >> 6) + 5 +} + +func (gzipCodec) NewWriter(w io.Writer) io.WriteCloser { + return gzip.NewWriter(w) +} + +func (gzipCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, level) +} + +func init() { + codecs[Codecs.Gzip] = gzipCodec{} +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go new file mode 100644 index 00000000..b7fa1142 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compress + +import ( + "io" + + "github.com/golang/snappy" +) + +type snappyCodec struct{} + +func (snappyCodec) Encode(dst, src []byte) []byte { + return snappy.Encode(dst, src) +} + +func (snappyCodec) EncodeLevel(dst, src []byte, _ int) []byte { + return snappy.Encode(dst, src) +} + +func (snappyCodec) Decode(dst, src []byte) []byte { + dst, err := snappy.Decode(dst, src) + if err != nil { + panic(err) + } + return dst +} + +func (snappyCodec) NewReader(r io.Reader) io.ReadCloser { + return io.NopCloser(snappy.NewReader(r)) +} + +func (snappyCodec) CompressBound(len int64) int64 { + return int64(snappy.MaxEncodedLen(int(len))) +} + +func (snappyCodec) NewWriter(w io.Writer) io.WriteCloser { + return snappy.NewBufferedWriter(w) +} + +func (s snappyCodec) NewWriterLevel(w io.Writer, _ int) (io.WriteCloser, error) { + return s.NewWriter(w), nil +} + +func init() { + codecs[Codecs.Snappy] = snappyCodec{} +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go new file mode 100644 index 00000000..5db24f04 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compress + +import ( + "io" + "sync" + + "github.com/apache/arrow/go/v12/parquet/internal/debug" + "github.com/klauspost/compress/zstd" +) + +type zstdCodec struct{} + +type zstdcloser struct { + *zstd.Decoder +} + +var ( + enc *zstd.Encoder + dec *zstd.Decoder + initEncoder sync.Once + initDecoder sync.Once +) + +func getencoder() *zstd.Encoder { + initEncoder.Do(func() { + enc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) + }) + return enc +} + +func getdecoder() *zstd.Decoder { + initDecoder.Do(func() { + dec, _ = zstd.NewReader(nil) + }) + return dec +} + +func (zstdCodec) Decode(dst, src []byte) []byte { + dst, err := getdecoder().DecodeAll(src, dst[:0]) + if err != nil { + panic(err) + } + return dst +} + +func (z *zstdcloser) Close() error { + z.Decoder.Close() + return nil +} + +func (zstdCodec) NewReader(r io.Reader) io.ReadCloser { + ret, _ := zstd.NewReader(r) + return &zstdcloser{ret} +} + +func (zstdCodec) NewWriter(w io.Writer) io.WriteCloser { + ret, _ := zstd.NewWriter(w) + return ret +} + +func (zstdCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { + var compressLevel zstd.EncoderLevel + if level == DefaultCompressionLevel { + compressLevel = zstd.SpeedDefault + } else { + compressLevel = zstd.EncoderLevelFromZstd(level) + } + return zstd.NewWriter(w, zstd.WithEncoderLevel(compressLevel)) +} + +func (z zstdCodec) Encode(dst, src []byte) []byte { + return getencoder().EncodeAll(src, dst[:0]) +} + +func (z zstdCodec) EncodeLevel(dst, src []byte, level int) []byte { + compressLevel := zstd.EncoderLevelFromZstd(level) + if level == DefaultCompressionLevel { + compressLevel = zstd.SpeedDefault + } + enc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), zstd.WithEncoderLevel(compressLevel)) + return enc.EncodeAll(src, dst[:0]) +} + +// from zstd.h, ZSTD_COMPRESSBOUND +func (zstdCodec) CompressBound(len int64) int64 { + debug.Assert(len > 0, "len for zstd CompressBound should be > 0") + extra := ((128 << 10) - len) >> 11 + if len >= (128 << 10) { + extra = 0 + } + return len + (len >> 8) + extra +} + +func init() { + codecs[Codecs.Zstd] = zstdCodec{} +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/doc.go b/vendor/github.com/apache/arrow/go/v12/parquet/doc.go new file mode 100644 index 00000000..c3875996 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/doc.go @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parquet provides an implementation of Apache Parquet for Go. +// +// Apache Parquet is an open-source columnar data storage format using the record +// shredding and assembly algorithm to accommodate complex data structures which +// can then be used to efficiently store the data. +// +// While the go.mod states go1.18, everything here should be compatible +// with go versions 1.17 and 1.16. +// +// This implementation is a native go implementation for reading and writing the +// parquet file format. +// +// Install +// +// You can download the library and cli utilities via: +// go get -u github.com/apache/arrow/go/v12/parquet +// go install github.com/apache/arrow/go/v12/parquet/cmd/parquet_reader@latest +// go install github.com/apache/arrow/go/v12/parquet/cmd/parquet_schema@latest +// +// Modules +// +// This top level parquet package contains the basic common types and reader/writer +// properties along with some utilities that are used throughout the other modules. +// +// The file module contains the functions for directly reading/writing parquet files +// including Column Readers and Column Writers. +// +// The metadata module contains the types for managing the lower level file/rowgroup/column +// metadata inside of a ParquetFile including inspecting the statistics. +// +// The pqarrow module contains helper functions and types for converting directly +// between Parquet and Apache Arrow formats. +// +// The schema module contains the types for manipulating / inspecting / creating +// parquet file schemas. +// +// Primitive Types +// +// The Parquet Primitive Types and their corresponding Go types are Boolean (bool), +// Int32 (int32), Int64 (int64), Int96 (parquet.Int96), Float (float32), Double (float64), +// ByteArray (parquet.ByteArray) and FixedLenByteArray (parquet.FixedLenByteArray). +// +// Encodings +// +// The encoding types supported in this package are: +// Plain, Plain/RLE Dictionary, Delta Binary Packed (only integer types), Delta Byte Array +// (only ByteArray), Delta Length Byte Array (only ByteArray) +// +// Tip: Some platforms don't necessarily support all kinds of encodings. If you're not +// sure what to use, just use Plain and Dictionary encoding. +package parquet + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Version -linecomment +//go:generate thrift -o internal -r --gen go ../../cpp/src/parquet/parquet.thrift diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go new file mode 100644 index 00000000..660ee786 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go @@ -0,0 +1,711 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parquet + +import ( + "crypto/rand" + "unicode/utf8" + + format "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" +) + +// Constants that will be used as the default values with encryption/decryption +const ( + // By default we'll use AesGCM as our encryption algorithm + DefaultEncryptionAlgorithm = AesGcm + MaximalAadMetadataLength int32 = 256 + // if encryption is turned on, we will default to also encrypting the footer + DefaultEncryptedFooter = true + DefaultCheckSignature = true + // by default if you set the file decryption properties, we will error + // on any plaintext files unless otherwise specified. + DefaultAllowPlaintextFiles = false + AadFileUniqueLength int32 = 8 +) + +// ColumnPathToDecryptionPropsMap maps column paths to decryption properties +type ColumnPathToDecryptionPropsMap map[string]*ColumnDecryptionProperties + +// ColumnPathToEncryptionPropsMap maps column paths to encryption properties +type ColumnPathToEncryptionPropsMap map[string]*ColumnEncryptionProperties + +// ColumnEncryptionProperties specifies how to encrypt a given column +type ColumnEncryptionProperties struct { + columnPath string + encrypted bool + encryptedWithFooterKey bool + key string + keyMetadata string + utilized bool +} + +// ColumnPath returns which column these properties are for +func (ce *ColumnEncryptionProperties) ColumnPath() string { + return ce.columnPath +} + +// IsEncrypted returns true if this column is encrypted. +func (ce *ColumnEncryptionProperties) IsEncrypted() bool { return ce.encrypted } + +// IsEncryptedWithFooterKey returns if this column was encrypted with the footer key itself, or false if a separate +// key was used for encrypting this column. +func (ce *ColumnEncryptionProperties) IsEncryptedWithFooterKey() bool { + return ce.encryptedWithFooterKey +} + +// Key returns the key used for encrypting this column if it isn't encrypted by the footer key +func (ce *ColumnEncryptionProperties) Key() string { return ce.key } + +// KeyMetadata returns the key identifier which is used with a KeyRetriever to get the key for this column if it is not +// encrypted using the footer key +func (ce *ColumnEncryptionProperties) KeyMetadata() string { return ce.keyMetadata } + +// WipeOutEncryptionKey Clears the encryption key, used after completion of file writing +func (ce *ColumnEncryptionProperties) WipeOutEncryptionKey() { ce.key = "" } + +// IsUtilized returns whether or not these properties have already been used, if the key is empty +// then this is always false +func (ce *ColumnEncryptionProperties) IsUtilized() bool { + if ce.key == "" { + return false + } + return ce.utilized +} + +// SetUtilized is used for marking it as utilized once it is used in FileEncryptionProperties +// as the encryption key will be wiped out on completion of writing +func (ce *ColumnEncryptionProperties) SetUtilized() { + ce.utilized = true +} + +// Clone returns a instance of ColumnEncryptionProperties with the same key and metadata +func (ce *ColumnEncryptionProperties) Clone() *ColumnEncryptionProperties { + copy := ce.key + return NewColumnEncryptionProperties(ce.columnPath, WithKey(copy), WithKeyMetadata(ce.keyMetadata)) +} + +type colEncryptConfig struct { + key string + keyMetadata string + encrypted bool +} + +// ColumnEncryptOption how to specify options to the the NewColumnEncryptionProperties function. +type ColumnEncryptOption func(*colEncryptConfig) + +// WithKey sets a column specific key. +// If key is not set on an encrypted column, the column will be encrypted with the footer key. +// key length must be either 16, 24, or 32 bytes +// the key is cloned and will be wiped out (array values set to 0) upon completion of file writing. +// Caller is responsible for wiping out input key array +func WithKey(key string) ColumnEncryptOption { + return func(c *colEncryptConfig) { + if key != "" { + c.key = key + } + } +} + +// WithKeyMetadata sets the key retrieval metadata, use either KeyMetadata or KeyID but not both +func WithKeyMetadata(keyMeta string) ColumnEncryptOption { + return func(c *colEncryptConfig) { + c.keyMetadata = keyMeta + } +} + +// WithKeyID is a convenience function to set the key metadata using a string id. +// Set a key retrieval metadata (converted from String). and use either KeyMetadata or KeyID, not both. +// KeyID will be converted to metadata (UTF-8 Array) +func WithKeyID(keyID string) ColumnEncryptOption { + if !utf8.ValidString(keyID) { + panic("parquet: key id should be UTF8 encoded") + } + return WithKeyMetadata(keyID) +} + +// NewColumnEncryptionProperties constructs properties for the provided column path, modified by the options provided +func NewColumnEncryptionProperties(name string, opts ...ColumnEncryptOption) *ColumnEncryptionProperties { + var cfg colEncryptConfig + cfg.encrypted = true + for _, o := range opts { + o(&cfg) + } + return &ColumnEncryptionProperties{ + utilized: false, + encrypted: cfg.encrypted, + encryptedWithFooterKey: cfg.encrypted && cfg.key == "", + keyMetadata: cfg.keyMetadata, + key: cfg.key, + columnPath: name, + } +} + +// ColumnDecryptionProperties are the specifications for how to decrypt a given column. +type ColumnDecryptionProperties struct { + columnPath string + key string + utilized bool +} + +// NewColumnDecryptionProperties constructs a new ColumnDecryptionProperties for the given column path, modified by +// the provided options +func NewColumnDecryptionProperties(column string, opts ...ColumnDecryptOption) *ColumnDecryptionProperties { + var cfg columnDecryptConfig + for _, o := range opts { + o(&cfg) + } + + return &ColumnDecryptionProperties{ + columnPath: column, + utilized: false, + key: cfg.key, + } +} + +// ColumnPath returns which column these properties describe how to decrypt +func (cd *ColumnDecryptionProperties) ColumnPath() string { return cd.columnPath } + +// Key returns the key specified to decrypt this column, or is empty if the Footer Key should be used. +func (cd *ColumnDecryptionProperties) Key() string { return cd.key } + +// IsUtilized returns whether or not these properties have been used for decryption already +func (cd *ColumnDecryptionProperties) IsUtilized() bool { return cd.utilized } + +// SetUtilized is used by the reader to specify when we've decrypted the column and have used the key so we know +// to wipe out the keys. +func (cd *ColumnDecryptionProperties) SetUtilized() { cd.utilized = true } + +// WipeOutDecryptionKey is called after decryption to ensure the key doesn't stick around and get re-used. +func (cd *ColumnDecryptionProperties) WipeOutDecryptionKey() { cd.key = "" } + +// Clone returns a new instance of ColumnDecryptionProperties with the same key and column +func (cd *ColumnDecryptionProperties) Clone() *ColumnDecryptionProperties { + return NewColumnDecryptionProperties(cd.columnPath, WithDecryptKey(cd.key)) +} + +type columnDecryptConfig struct { + key string +} + +// ColumnDecryptOption is the type of the options passed for constructing Decryption Properties +type ColumnDecryptOption func(*columnDecryptConfig) + +// WithDecryptKey specifies the key to utilize for decryption +func WithDecryptKey(key string) ColumnDecryptOption { + return func(cfg *columnDecryptConfig) { + if key != "" { + cfg.key = key + } + } +} + +// AADPrefixVerifier is an interface for any object that can be used to verify the identity of the file being decrypted. +// It should panic if the provided AAD identity is bad. +// +// In a data set, AAD Prefixes should be collected, and then checked for missing files. +type AADPrefixVerifier interface { + // Verify identity of file. panic if bad + Verify(string) +} + +// DecryptionKeyRetriever is an interface for getting the desired key for decryption from metadata. It should take in +// some metadata identifier and return the actual Key to use for decryption. +type DecryptionKeyRetriever interface { + GetKey(keyMetadata []byte) string +} + +// FileDecryptionProperties define the File Level configuration for decrypting a parquet file. Once constructed they are +// read only. +type FileDecryptionProperties struct { + footerKey string + aadPrefix string + checkPlaintextFooterIntegrity bool + plaintextAllowed bool + utilized bool + columnDecryptProps ColumnPathToDecryptionPropsMap + Verifier AADPrefixVerifier + KeyRetriever DecryptionKeyRetriever +} + +// NewFileDecryptionProperties takes in the options for constructing a new FileDecryptionProperties object, otherwise +// it will use the default configuration which will check footer integrity of a plaintext footer for an encrypted file +// for unencrypted parquet files, the decryption properties should not be set. +func NewFileDecryptionProperties(opts ...FileDecryptionOption) *FileDecryptionProperties { + var cfg fileDecryptConfig + cfg.checkFooterIntegrity = DefaultCheckSignature + cfg.plaintextAllowed = DefaultAllowPlaintextFiles + for _, o := range opts { + o(&cfg) + } + return &FileDecryptionProperties{ + Verifier: cfg.verifier, + footerKey: cfg.footerKey, + checkPlaintextFooterIntegrity: cfg.checkFooterIntegrity, + KeyRetriever: cfg.retriever, + aadPrefix: cfg.aadPrefix, + columnDecryptProps: cfg.colDecrypt, + plaintextAllowed: cfg.plaintextAllowed, + utilized: false, + } +} + +// ColumnKey returns the key to be used for decrypting the provided column. +func (fd *FileDecryptionProperties) ColumnKey(path string) string { + if d, ok := fd.columnDecryptProps[path]; ok { + if d != nil { + return d.Key() + } + } + return "" +} + +// FooterKey returns the key utilized for decrypting the Footer if encrypted and any columns that are encrypted with +// the footer key. +func (fd *FileDecryptionProperties) FooterKey() string { return fd.footerKey } + +// AadPrefix returns the prefix to be supplied for constructing the identification strings when decrypting +func (fd *FileDecryptionProperties) AadPrefix() string { return fd.aadPrefix } + +// PlaintextFooterIntegrity returns whether or not an integrity check will be performed on a plaintext footer for an +// encrypted file. +func (fd *FileDecryptionProperties) PlaintextFooterIntegrity() bool { + return fd.checkPlaintextFooterIntegrity +} + +// PlaintextFilesAllowed returns whether or not this instance of decryption properties are allowed on a plaintext file. +func (fd *FileDecryptionProperties) PlaintextFilesAllowed() bool { return fd.plaintextAllowed } + +// SetUtilized is called to mark this instance as utilized once it is used to read a file. A single instance +// can be used for reading one file only. Setting this ensures the keys will be wiped out upon completion of file reading. +func (fd *FileDecryptionProperties) SetUtilized() { fd.utilized = true } + +// IsUtilized returns whether or not this instance has been used to decrypt a file. If the footer key and prefix are +// empty and there are no column decryption properties, then this is always false. +func (fd *FileDecryptionProperties) IsUtilized() bool { + if fd.footerKey == "" && len(fd.columnDecryptProps) == 0 && fd.aadPrefix == "" { + return false + } + return fd.utilized +} + +// WipeOutDecryptionKeys will clear all the keys for this instance including the column level ones, this will be called +// after this instance has been utilized. +func (fd *FileDecryptionProperties) WipeOutDecryptionKeys() { + fd.footerKey = "" + for _, cd := range fd.columnDecryptProps { + cd.WipeOutDecryptionKey() + } +} + +// Clone returns a new instance of these properties, changing the prefix if set (keeping the same prefix if left empty) +func (fd *FileDecryptionProperties) Clone(newAadPrefix string) *FileDecryptionProperties { + keyCopy := fd.footerKey + colDecryptMapCopy := make(ColumnPathToDecryptionPropsMap) + for k, v := range fd.columnDecryptProps { + colDecryptMapCopy[k] = v.Clone() + } + if newAadPrefix == "" { + newAadPrefix = fd.aadPrefix + } + return &FileDecryptionProperties{ + footerKey: keyCopy, + KeyRetriever: fd.KeyRetriever, + checkPlaintextFooterIntegrity: fd.checkPlaintextFooterIntegrity, + Verifier: fd.Verifier, + columnDecryptProps: colDecryptMapCopy, + aadPrefix: newAadPrefix, + plaintextAllowed: fd.plaintextAllowed, + utilized: false, + } +} + +type fileDecryptConfig struct { + footerKey string + aadPrefix string + verifier AADPrefixVerifier + colDecrypt ColumnPathToDecryptionPropsMap + retriever DecryptionKeyRetriever + checkFooterIntegrity bool + plaintextAllowed bool +} + +// FileDecryptionOption is how to supply options to constructing a new FileDecryptionProperties instance. +type FileDecryptionOption func(*fileDecryptConfig) + +// WithFooterKey sets an explicit footer key. If Applied on a file that contains footer key +// metadata the metadata will be ignored, the footer will be decrypted/verified with this key. +// +// If the explicit key is not set, footer key will be fetched from the key retriever. +// With explcit keys or AAD prefix, new encryption properties object must be created for each +// encrypted file. +// +// Explicit encryption keys (footer and column) are cloned. +// Upon completion of file reading, the cloned encryption keys in the properties will be wiped out +// Caller is responsible for wiping out the input key array +// footer key length must be either 16, 24, or 32 bytes +func WithFooterKey(key string) FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + if key != "" { + cfg.footerKey = key + } + } +} + +// WithPrefixVerifier supplies a verifier object to use for verifying the AAD Prefixes stored in the file. +func WithPrefixVerifier(verifier AADPrefixVerifier) FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + if verifier != nil { + cfg.verifier = verifier + } + } +} + +// WithColumnKeys sets explicit column keys. +// +// It's also possible to set a key retriever on this property object. +// +// Upon file decryption, availability of explicit keys is checked before invocation +// of the retreiver callback. +// +// If an explicit key is available for a footer or a column, its key metadata will be ignored. +func WithColumnKeys(decrypt ColumnPathToDecryptionPropsMap) FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + if len(decrypt) == 0 { + return + } + if len(cfg.colDecrypt) != 0 { + panic("column properties already set") + } + for _, v := range decrypt { + if v.IsUtilized() { + panic("parquet: column properties utilized in another file") + } + v.SetUtilized() + } + cfg.colDecrypt = decrypt + } +} + +// WithKeyRetriever sets a key retriever callback. It's also possible to set explicit footer or column keys. +func WithKeyRetriever(retriever DecryptionKeyRetriever) FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + if retriever != nil { + cfg.retriever = retriever + } + } +} + +// DisableFooterSignatureVerification skips integrity verification of plaintext footers. +// +// If not called, integrity of plaintext footers will be checked in runtime, and will panic +// if the footer signing key is not available +// or if the footer content and signature don't match +func DisableFooterSignatureVerification() FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + cfg.checkFooterIntegrity = false + } +} + +// WithPlaintextAllowed sets allowing plaintext files. +// +// By default, reading plaintext (unencrypted) files is not allowed when using +// a decryptor. +// +// In order to detect files that were not encrypted by mistake. +// However the default behavior can be overridden by using this method. +func WithPlaintextAllowed() FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + cfg.plaintextAllowed = true + } +} + +// WithDecryptAadPrefix explicitly supplies the file aad prefix. +// +// A must when a prefix is used for file encryption, but not stored in the file. +func WithDecryptAadPrefix(prefix string) FileDecryptionOption { + return func(cfg *fileDecryptConfig) { + if prefix != "" { + cfg.aadPrefix = prefix + } + } +} + +// Algorithm describes how something was encrypted, representing the EncryptionAlgorithm object from the +// parquet.thrift file. +type Algorithm struct { + Algo Cipher + Aad struct { + AadPrefix []byte + AadFileUnique []byte + SupplyAadPrefix bool + } +} + +// ToThrift returns an instance to be used for serializing when writing a file. +func (e Algorithm) ToThrift() *format.EncryptionAlgorithm { + if e.Algo == AesGcm { + return &format.EncryptionAlgorithm{ + AES_GCM_V1: &format.AesGcmV1{ + AadPrefix: e.Aad.AadPrefix, + AadFileUnique: e.Aad.AadFileUnique, + SupplyAadPrefix: &e.Aad.SupplyAadPrefix, + }, + } + } + return &format.EncryptionAlgorithm{ + AES_GCM_CTR_V1: &format.AesGcmCtrV1{ + AadPrefix: e.Aad.AadPrefix, + AadFileUnique: e.Aad.AadFileUnique, + SupplyAadPrefix: &e.Aad.SupplyAadPrefix, + }, + } +} + +// AlgorithmFromThrift converts the thrift object to the Algorithm struct for easier usage. +func AlgorithmFromThrift(enc *format.EncryptionAlgorithm) (ret Algorithm) { + if enc.IsSetAES_GCM_V1() { + ret.Algo = AesGcm + ret.Aad.AadFileUnique = enc.AES_GCM_V1.AadFileUnique + ret.Aad.AadPrefix = enc.AES_GCM_V1.AadPrefix + ret.Aad.SupplyAadPrefix = *enc.AES_GCM_V1.SupplyAadPrefix + return + } + ret.Algo = AesCtr + ret.Aad.AadFileUnique = enc.AES_GCM_CTR_V1.AadFileUnique + ret.Aad.AadPrefix = enc.AES_GCM_CTR_V1.AadPrefix + ret.Aad.SupplyAadPrefix = *enc.AES_GCM_CTR_V1.SupplyAadPrefix + return +} + +// FileEncryptionProperties describe how to encrypt a parquet file when writing data. +type FileEncryptionProperties struct { + alg Algorithm + footerKey string + footerKeyMetadata string + encryptedFooter bool + fileAad string + utilized bool + storeAadPrefixInFile bool + aadPrefix string + encryptedCols ColumnPathToEncryptionPropsMap +} + +// EncryptedFooter returns if the footer for this file should be encrypted or left in plaintext. +func (fe *FileEncryptionProperties) EncryptedFooter() bool { return fe.encryptedFooter } + +// Algorithm returns the description of how we will perform the encryption, the algorithm, prefixes, and so on. +func (fe *FileEncryptionProperties) Algorithm() Algorithm { return fe.alg } + +// FooterKey returns the actual key used to encrypt the footer if it is encrypted, or to encrypt any columns which +// will be encrypted with it rather than their own keys. +func (fe *FileEncryptionProperties) FooterKey() string { return fe.footerKey } + +// FooterKeyMetadata is used for retrieving a key from the key retriever in order to set the footer key +func (fe *FileEncryptionProperties) FooterKeyMetadata() string { return fe.footerKeyMetadata } + +// FileAad returns the aad identification to be used at the file level which gets concatenated with the row and column +// information for encrypting data. +func (fe *FileEncryptionProperties) FileAad() string { return fe.fileAad } + +// IsUtilized returns whether or not this instance has been used to encrypt a file +func (fe *FileEncryptionProperties) IsUtilized() bool { return fe.utilized } + +// SetUtilized is called after writing a file. A FileEncryptionProperties object can be used for writing one file only, +// the encryption keys will be wiped out upon completion of writing the file. +func (fe *FileEncryptionProperties) SetUtilized() { fe.utilized = true } + +// EncryptedColumns returns the mapping of column paths to column encryption properties +func (fe *FileEncryptionProperties) EncryptedColumns() ColumnPathToEncryptionPropsMap { + return fe.encryptedCols +} + +// ColumnEncryptionProperties returns the properties for encrypting a given column. +// +// This may be nil for columns that aren't encrypted or may be default properties. +func (fe *FileEncryptionProperties) ColumnEncryptionProperties(path string) *ColumnEncryptionProperties { + if len(fe.encryptedCols) == 0 { + return NewColumnEncryptionProperties(path) + } + if c, ok := fe.encryptedCols[path]; ok { + return c + } + return nil +} + +// Clone allows returning an identical property setup for another file with the option to update the aadPrefix, +// (if given the empty string, the current aad prefix will be used) since a single instance can only be used +// to encrypt one file before wiping out the keys. +func (fe *FileEncryptionProperties) Clone(newAadPrefix string) *FileEncryptionProperties { + footerKeyCopy := fe.footerKey + encryptedColsCopy := make(ColumnPathToEncryptionPropsMap) + for k, v := range fe.encryptedCols { + encryptedColsCopy[k] = v.Clone() + } + if newAadPrefix == "" { + newAadPrefix = fe.aadPrefix + } + + opts := []EncryptOption{ + WithAlg(fe.alg.Algo), WithFooterKeyMetadata(fe.footerKeyMetadata), + WithAadPrefix(newAadPrefix), WithEncryptedColumns(encryptedColsCopy), + } + if !fe.encryptedFooter { + opts = append(opts, WithPlaintextFooter()) + } + if !fe.storeAadPrefixInFile { + opts = append(opts, DisableAadPrefixStorage()) + } + return NewFileEncryptionProperties(footerKeyCopy, opts...) +} + +// WipeOutEncryptionKeys clears all of the encryption keys for this and the columns +func (fe *FileEncryptionProperties) WipeOutEncryptionKeys() { + fe.footerKey = "" + for _, elem := range fe.encryptedCols { + elem.WipeOutEncryptionKey() + } +} + +type configEncrypt struct { + cipher Cipher + encryptFooter bool + keyMetadata string + aadprefix string + storeAadPrefixInFile bool + encryptedCols ColumnPathToEncryptionPropsMap +} + +// EncryptOption is used for specifying values when building FileEncryptionProperties +type EncryptOption func(*configEncrypt) + +// WithPlaintextFooter sets the writer to write the footer in plain text, otherwise the footer will be encrypted +// too (which is the default behavior). +func WithPlaintextFooter() EncryptOption { + return func(cfg *configEncrypt) { + cfg.encryptFooter = false + } +} + +// WithAlg sets the encryption algorithm to utilize. (default is AesGcm) +func WithAlg(cipher Cipher) EncryptOption { + return func(cfg *configEncrypt) { + cfg.cipher = cipher + } +} + +// WithFooterKeyID sets a key retrieval metadata to use (converted from string), this must be a utf8 string. +// +// use either WithFooterKeyID or WithFooterKeyMetadata, not both. +func WithFooterKeyID(key string) EncryptOption { + if !utf8.ValidString(key) { + panic("parquet: footer key id should be UTF8 encoded") + } + return WithFooterKeyMetadata(key) +} + +// WithFooterKeyMetadata sets a key retrieval metadata to use for getting the key. +// +// Use either WithFooterKeyID or WithFooterKeyMetadata, not both. +func WithFooterKeyMetadata(keyMeta string) EncryptOption { + return func(cfg *configEncrypt) { + if keyMeta != "" { + cfg.keyMetadata = keyMeta + } + } +} + +// WithAadPrefix sets the AAD prefix to use for encryption and by default will store it in the file +func WithAadPrefix(aadPrefix string) EncryptOption { + return func(cfg *configEncrypt) { + if aadPrefix != "" { + cfg.aadprefix = aadPrefix + cfg.storeAadPrefixInFile = true + } + } +} + +// DisableAadPrefixStorage will set the properties to not store the AadPrefix in the file. If this isn't called +// and the AadPrefix is set, then it will be stored. This needs to in the options *after* WithAadPrefix to have an effect. +func DisableAadPrefixStorage() EncryptOption { + return func(cfg *configEncrypt) { + cfg.storeAadPrefixInFile = false + } +} + +// WithEncryptedColumns sets the map of columns and their properties (keys etc.) If not called, then all columns will +// be encrypted with the footer key. If called, then columns not in the map will be left unencrypted. +func WithEncryptedColumns(encrypted ColumnPathToEncryptionPropsMap) EncryptOption { + none := func(*configEncrypt) {} + if len(encrypted) == 0 { + return none + } + return func(cfg *configEncrypt) { + if len(cfg.encryptedCols) != 0 { + panic("column properties already set") + } + for _, v := range encrypted { + if v.IsUtilized() { + panic("column properties utilized in another file") + } + v.SetUtilized() + } + cfg.encryptedCols = encrypted + } +} + +// NewFileEncryptionProperties returns a new File Encryption description object using the options provided. +func NewFileEncryptionProperties(footerKey string, opts ...EncryptOption) *FileEncryptionProperties { + var cfg configEncrypt + cfg.cipher = DefaultEncryptionAlgorithm + cfg.encryptFooter = DefaultEncryptedFooter + for _, o := range opts { + o(&cfg) + } + + props := &FileEncryptionProperties{ + footerKey: footerKey, + footerKeyMetadata: cfg.keyMetadata, + encryptedFooter: cfg.encryptFooter, + aadPrefix: cfg.aadprefix, + storeAadPrefixInFile: cfg.storeAadPrefixInFile, + encryptedCols: cfg.encryptedCols, + utilized: false, + } + + aadFileUnique := [AadFileUniqueLength]uint8{} + _, err := rand.Read(aadFileUnique[:]) + if err != nil { + panic(err) + } + + supplyAadPrefix := false + if props.aadPrefix == "" { + props.fileAad = string(aadFileUnique[:]) + } else { + props.fileAad = props.aadPrefix + string(aadFileUnique[:]) + if !props.storeAadPrefixInFile { + supplyAadPrefix = true + } + } + props.alg.Algo = cfg.cipher + props.alg.Aad.AadFileUnique = aadFileUnique[:] + props.alg.Aad.SupplyAadPrefix = supplyAadPrefix + if cfg.aadprefix != "" && cfg.storeAadPrefixInFile { + props.alg.Aad.AadPrefix = []byte(props.aadPrefix) + } + return props +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go new file mode 100644 index 00000000..52b9a233 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !assert + +package debug + +// Assert will panic with msg if cond is false. +// +// msg must be a string, func() string or fmt.Stringer. +func Assert(cond bool, msg interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go new file mode 100644 index 00000000..188e6831 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build assert + +package debug + +// Assert will panic with msg if cond is false. +// +// msg should be a string or fmt.Stringer +func Assert(cond bool, msg interface{}) { + if !cond { + panic(msg) + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go new file mode 100644 index 00000000..61684d62 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debug provides APIs for conditional runtime assertions and debug logging. +// +// Using Assert +// +// To enable runtime assertions, build with the assert tag. When the assert tag is omitted, +// the code for the assertion will be omitted from the binary. +package debug diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go new file mode 100644 index 00000000..23dcccd8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !debug + +package debug + +// use build tags in order to control the existence of this log function vs it getting +// optimized away as a noop without the debug build tag. + +func Log(interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go new file mode 100644 index 00000000..8d610609 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build debug + +package debug + +import ( + "log" + "os" +) + +var ( + debug = log.New(os.Stderr, "[D] ", log.LstdFlags) +) + +func Log(msg interface{}) { + debug.Println(msg) +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go new file mode 100644 index 00000000..1de0c8de --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. + +package parquet + +var GoUnusedProtection__ int; + diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go new file mode 100644 index 00000000..d4a63b22 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go @@ -0,0 +1,23 @@ +// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. + +package parquet + +import ( + "bytes" + "context" + "fmt" + "time" + thrift "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + + +func init() { +} + diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go new file mode 100644 index 00000000..d4508f8e --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go @@ -0,0 +1,10967 @@ +// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. + +package parquet + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + thrift "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +//Types supported by Parquet. These types are intended to be used in combination +//with the encodings to control the on disk storage format. +//For example INT16 is not included as a type since a good encoding of INT32 +//would handle this. +type Type int64 +const ( + Type_BOOLEAN Type = 0 + Type_INT32 Type = 1 + Type_INT64 Type = 2 + Type_INT96 Type = 3 + Type_FLOAT Type = 4 + Type_DOUBLE Type = 5 + Type_BYTE_ARRAY Type = 6 + Type_FIXED_LEN_BYTE_ARRAY Type = 7 +) + +func (p Type) String() string { + switch p { + case Type_BOOLEAN: return "BOOLEAN" + case Type_INT32: return "INT32" + case Type_INT64: return "INT64" + case Type_INT96: return "INT96" + case Type_FLOAT: return "FLOAT" + case Type_DOUBLE: return "DOUBLE" + case Type_BYTE_ARRAY: return "BYTE_ARRAY" + case Type_FIXED_LEN_BYTE_ARRAY: return "FIXED_LEN_BYTE_ARRAY" + } + return "" +} + +func TypeFromString(s string) (Type, error) { + switch s { + case "BOOLEAN": return Type_BOOLEAN, nil + case "INT32": return Type_INT32, nil + case "INT64": return Type_INT64, nil + case "INT96": return Type_INT96, nil + case "FLOAT": return Type_FLOAT, nil + case "DOUBLE": return Type_DOUBLE, nil + case "BYTE_ARRAY": return Type_BYTE_ARRAY, nil + case "FIXED_LEN_BYTE_ARRAY": return Type_FIXED_LEN_BYTE_ARRAY, nil + } + return Type(0), fmt.Errorf("not a valid Type string") +} + + +func TypePtr(v Type) *Type { return &v } + +func (p Type) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *Type) UnmarshalText(text []byte) error { +q, err := TypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *Type) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = Type(v) +return nil +} + +func (p * Type) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +//Common types used by frameworks(e.g. hive, pig) using parquet. This helps map +//between types in those frameworks to the base types in parquet. This is only +//metadata and not needed to read or write the data. +type ConvertedType int64 +const ( + ConvertedType_UTF8 ConvertedType = 0 + ConvertedType_MAP ConvertedType = 1 + ConvertedType_MAP_KEY_VALUE ConvertedType = 2 + ConvertedType_LIST ConvertedType = 3 + ConvertedType_ENUM ConvertedType = 4 + ConvertedType_DECIMAL ConvertedType = 5 + ConvertedType_DATE ConvertedType = 6 + ConvertedType_TIME_MILLIS ConvertedType = 7 + ConvertedType_TIME_MICROS ConvertedType = 8 + ConvertedType_TIMESTAMP_MILLIS ConvertedType = 9 + ConvertedType_TIMESTAMP_MICROS ConvertedType = 10 + ConvertedType_UINT_8 ConvertedType = 11 + ConvertedType_UINT_16 ConvertedType = 12 + ConvertedType_UINT_32 ConvertedType = 13 + ConvertedType_UINT_64 ConvertedType = 14 + ConvertedType_INT_8 ConvertedType = 15 + ConvertedType_INT_16 ConvertedType = 16 + ConvertedType_INT_32 ConvertedType = 17 + ConvertedType_INT_64 ConvertedType = 18 + ConvertedType_JSON ConvertedType = 19 + ConvertedType_BSON ConvertedType = 20 + ConvertedType_INTERVAL ConvertedType = 21 +) + +func (p ConvertedType) String() string { + switch p { + case ConvertedType_UTF8: return "UTF8" + case ConvertedType_MAP: return "MAP" + case ConvertedType_MAP_KEY_VALUE: return "MAP_KEY_VALUE" + case ConvertedType_LIST: return "LIST" + case ConvertedType_ENUM: return "ENUM" + case ConvertedType_DECIMAL: return "DECIMAL" + case ConvertedType_DATE: return "DATE" + case ConvertedType_TIME_MILLIS: return "TIME_MILLIS" + case ConvertedType_TIME_MICROS: return "TIME_MICROS" + case ConvertedType_TIMESTAMP_MILLIS: return "TIMESTAMP_MILLIS" + case ConvertedType_TIMESTAMP_MICROS: return "TIMESTAMP_MICROS" + case ConvertedType_UINT_8: return "UINT_8" + case ConvertedType_UINT_16: return "UINT_16" + case ConvertedType_UINT_32: return "UINT_32" + case ConvertedType_UINT_64: return "UINT_64" + case ConvertedType_INT_8: return "INT_8" + case ConvertedType_INT_16: return "INT_16" + case ConvertedType_INT_32: return "INT_32" + case ConvertedType_INT_64: return "INT_64" + case ConvertedType_JSON: return "JSON" + case ConvertedType_BSON: return "BSON" + case ConvertedType_INTERVAL: return "INTERVAL" + } + return "" +} + +func ConvertedTypeFromString(s string) (ConvertedType, error) { + switch s { + case "UTF8": return ConvertedType_UTF8, nil + case "MAP": return ConvertedType_MAP, nil + case "MAP_KEY_VALUE": return ConvertedType_MAP_KEY_VALUE, nil + case "LIST": return ConvertedType_LIST, nil + case "ENUM": return ConvertedType_ENUM, nil + case "DECIMAL": return ConvertedType_DECIMAL, nil + case "DATE": return ConvertedType_DATE, nil + case "TIME_MILLIS": return ConvertedType_TIME_MILLIS, nil + case "TIME_MICROS": return ConvertedType_TIME_MICROS, nil + case "TIMESTAMP_MILLIS": return ConvertedType_TIMESTAMP_MILLIS, nil + case "TIMESTAMP_MICROS": return ConvertedType_TIMESTAMP_MICROS, nil + case "UINT_8": return ConvertedType_UINT_8, nil + case "UINT_16": return ConvertedType_UINT_16, nil + case "UINT_32": return ConvertedType_UINT_32, nil + case "UINT_64": return ConvertedType_UINT_64, nil + case "INT_8": return ConvertedType_INT_8, nil + case "INT_16": return ConvertedType_INT_16, nil + case "INT_32": return ConvertedType_INT_32, nil + case "INT_64": return ConvertedType_INT_64, nil + case "JSON": return ConvertedType_JSON, nil + case "BSON": return ConvertedType_BSON, nil + case "INTERVAL": return ConvertedType_INTERVAL, nil + } + return ConvertedType(0), fmt.Errorf("not a valid ConvertedType string") +} + + +func ConvertedTypePtr(v ConvertedType) *ConvertedType { return &v } + +func (p ConvertedType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *ConvertedType) UnmarshalText(text []byte) error { +q, err := ConvertedTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *ConvertedType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = ConvertedType(v) +return nil +} + +func (p * ConvertedType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +//Representation of Schemas +type FieldRepetitionType int64 +const ( + FieldRepetitionType_REQUIRED FieldRepetitionType = 0 + FieldRepetitionType_OPTIONAL FieldRepetitionType = 1 + FieldRepetitionType_REPEATED FieldRepetitionType = 2 +) + +func (p FieldRepetitionType) String() string { + switch p { + case FieldRepetitionType_REQUIRED: return "REQUIRED" + case FieldRepetitionType_OPTIONAL: return "OPTIONAL" + case FieldRepetitionType_REPEATED: return "REPEATED" + } + return "" +} + +func FieldRepetitionTypeFromString(s string) (FieldRepetitionType, error) { + switch s { + case "REQUIRED": return FieldRepetitionType_REQUIRED, nil + case "OPTIONAL": return FieldRepetitionType_OPTIONAL, nil + case "REPEATED": return FieldRepetitionType_REPEATED, nil + } + return FieldRepetitionType(0), fmt.Errorf("not a valid FieldRepetitionType string") +} + + +func FieldRepetitionTypePtr(v FieldRepetitionType) *FieldRepetitionType { return &v } + +func (p FieldRepetitionType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *FieldRepetitionType) UnmarshalText(text []byte) error { +q, err := FieldRepetitionTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *FieldRepetitionType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = FieldRepetitionType(v) +return nil +} + +func (p * FieldRepetitionType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +//Encodings supported by Parquet. Not all encodings are valid for all types. These +//enums are also used to specify the encoding of definition and repetition levels. +//See the accompanying doc for the details of the more complicated encodings. +type Encoding int64 +const ( + Encoding_PLAIN Encoding = 0 + Encoding_PLAIN_DICTIONARY Encoding = 2 + Encoding_RLE Encoding = 3 + Encoding_BIT_PACKED Encoding = 4 + Encoding_DELTA_BINARY_PACKED Encoding = 5 + Encoding_DELTA_LENGTH_BYTE_ARRAY Encoding = 6 + Encoding_DELTA_BYTE_ARRAY Encoding = 7 + Encoding_RLE_DICTIONARY Encoding = 8 + Encoding_BYTE_STREAM_SPLIT Encoding = 9 +) + +func (p Encoding) String() string { + switch p { + case Encoding_PLAIN: return "PLAIN" + case Encoding_PLAIN_DICTIONARY: return "PLAIN_DICTIONARY" + case Encoding_RLE: return "RLE" + case Encoding_BIT_PACKED: return "BIT_PACKED" + case Encoding_DELTA_BINARY_PACKED: return "DELTA_BINARY_PACKED" + case Encoding_DELTA_LENGTH_BYTE_ARRAY: return "DELTA_LENGTH_BYTE_ARRAY" + case Encoding_DELTA_BYTE_ARRAY: return "DELTA_BYTE_ARRAY" + case Encoding_RLE_DICTIONARY: return "RLE_DICTIONARY" + case Encoding_BYTE_STREAM_SPLIT: return "BYTE_STREAM_SPLIT" + } + return "" +} + +func EncodingFromString(s string) (Encoding, error) { + switch s { + case "PLAIN": return Encoding_PLAIN, nil + case "PLAIN_DICTIONARY": return Encoding_PLAIN_DICTIONARY, nil + case "RLE": return Encoding_RLE, nil + case "BIT_PACKED": return Encoding_BIT_PACKED, nil + case "DELTA_BINARY_PACKED": return Encoding_DELTA_BINARY_PACKED, nil + case "DELTA_LENGTH_BYTE_ARRAY": return Encoding_DELTA_LENGTH_BYTE_ARRAY, nil + case "DELTA_BYTE_ARRAY": return Encoding_DELTA_BYTE_ARRAY, nil + case "RLE_DICTIONARY": return Encoding_RLE_DICTIONARY, nil + case "BYTE_STREAM_SPLIT": return Encoding_BYTE_STREAM_SPLIT, nil + } + return Encoding(0), fmt.Errorf("not a valid Encoding string") +} + + +func EncodingPtr(v Encoding) *Encoding { return &v } + +func (p Encoding) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *Encoding) UnmarshalText(text []byte) error { +q, err := EncodingFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *Encoding) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = Encoding(v) +return nil +} + +func (p * Encoding) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +//Supported compression algorithms. +// +//Codecs added in format version X.Y can be read by readers based on X.Y and later. +//Codec support may vary between readers based on the format version and +//libraries available at runtime. +// +//See Compression.md for a detailed specification of these algorithms. +type CompressionCodec int64 +const ( + CompressionCodec_UNCOMPRESSED CompressionCodec = 0 + CompressionCodec_SNAPPY CompressionCodec = 1 + CompressionCodec_GZIP CompressionCodec = 2 + CompressionCodec_LZO CompressionCodec = 3 + CompressionCodec_BROTLI CompressionCodec = 4 + CompressionCodec_LZ4 CompressionCodec = 5 + CompressionCodec_ZSTD CompressionCodec = 6 + CompressionCodec_LZ4_RAW CompressionCodec = 7 +) + +func (p CompressionCodec) String() string { + switch p { + case CompressionCodec_UNCOMPRESSED: return "UNCOMPRESSED" + case CompressionCodec_SNAPPY: return "SNAPPY" + case CompressionCodec_GZIP: return "GZIP" + case CompressionCodec_LZO: return "LZO" + case CompressionCodec_BROTLI: return "BROTLI" + case CompressionCodec_LZ4: return "LZ4" + case CompressionCodec_ZSTD: return "ZSTD" + case CompressionCodec_LZ4_RAW: return "LZ4_RAW" + } + return "" +} + +func CompressionCodecFromString(s string) (CompressionCodec, error) { + switch s { + case "UNCOMPRESSED": return CompressionCodec_UNCOMPRESSED, nil + case "SNAPPY": return CompressionCodec_SNAPPY, nil + case "GZIP": return CompressionCodec_GZIP, nil + case "LZO": return CompressionCodec_LZO, nil + case "BROTLI": return CompressionCodec_BROTLI, nil + case "LZ4": return CompressionCodec_LZ4, nil + case "ZSTD": return CompressionCodec_ZSTD, nil + case "LZ4_RAW": return CompressionCodec_LZ4_RAW, nil + } + return CompressionCodec(0), fmt.Errorf("not a valid CompressionCodec string") +} + + +func CompressionCodecPtr(v CompressionCodec) *CompressionCodec { return &v } + +func (p CompressionCodec) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *CompressionCodec) UnmarshalText(text []byte) error { +q, err := CompressionCodecFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *CompressionCodec) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = CompressionCodec(v) +return nil +} + +func (p * CompressionCodec) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type PageType int64 +const ( + PageType_DATA_PAGE PageType = 0 + PageType_INDEX_PAGE PageType = 1 + PageType_DICTIONARY_PAGE PageType = 2 + PageType_DATA_PAGE_V2 PageType = 3 +) + +func (p PageType) String() string { + switch p { + case PageType_DATA_PAGE: return "DATA_PAGE" + case PageType_INDEX_PAGE: return "INDEX_PAGE" + case PageType_DICTIONARY_PAGE: return "DICTIONARY_PAGE" + case PageType_DATA_PAGE_V2: return "DATA_PAGE_V2" + } + return "" +} + +func PageTypeFromString(s string) (PageType, error) { + switch s { + case "DATA_PAGE": return PageType_DATA_PAGE, nil + case "INDEX_PAGE": return PageType_INDEX_PAGE, nil + case "DICTIONARY_PAGE": return PageType_DICTIONARY_PAGE, nil + case "DATA_PAGE_V2": return PageType_DATA_PAGE_V2, nil + } + return PageType(0), fmt.Errorf("not a valid PageType string") +} + + +func PageTypePtr(v PageType) *PageType { return &v } + +func (p PageType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *PageType) UnmarshalText(text []byte) error { +q, err := PageTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *PageType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = PageType(v) +return nil +} + +func (p * PageType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +//Enum to annotate whether lists of min/max elements inside ColumnIndex +//are ordered and if so, in which direction. +type BoundaryOrder int64 +const ( + BoundaryOrder_UNORDERED BoundaryOrder = 0 + BoundaryOrder_ASCENDING BoundaryOrder = 1 + BoundaryOrder_DESCENDING BoundaryOrder = 2 +) + +func (p BoundaryOrder) String() string { + switch p { + case BoundaryOrder_UNORDERED: return "UNORDERED" + case BoundaryOrder_ASCENDING: return "ASCENDING" + case BoundaryOrder_DESCENDING: return "DESCENDING" + } + return "" +} + +func BoundaryOrderFromString(s string) (BoundaryOrder, error) { + switch s { + case "UNORDERED": return BoundaryOrder_UNORDERED, nil + case "ASCENDING": return BoundaryOrder_ASCENDING, nil + case "DESCENDING": return BoundaryOrder_DESCENDING, nil + } + return BoundaryOrder(0), fmt.Errorf("not a valid BoundaryOrder string") +} + + +func BoundaryOrderPtr(v BoundaryOrder) *BoundaryOrder { return &v } + +func (p BoundaryOrder) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *BoundaryOrder) UnmarshalText(text []byte) error { +q, err := BoundaryOrderFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *BoundaryOrder) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = BoundaryOrder(v) +return nil +} + +func (p * BoundaryOrder) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Statistics per row group and per page +// All fields are optional. +// +// Attributes: +// - Max: DEPRECATED: min and max value of the column. Use min_value and max_value. +// +// Values are encoded using PLAIN encoding, except that variable-length byte +// arrays do not include a length prefix. +// +// These fields encode min and max values determined by signed comparison +// only. New files should use the correct order for a column's logical type +// and store the values in the min_value and max_value fields. +// +// To support older readers, these may be set when the column order is +// signed. +// - Min +// - NullCount: count of null value in the column +// - DistinctCount: count of distinct values occurring +// - MaxValue: Min and max values for the column, determined by its ColumnOrder. +// +// Values are encoded using PLAIN encoding, except that variable-length byte +// arrays do not include a length prefix. +// - MinValue +type Statistics struct { + Max []byte `thrift:"max,1" db:"max" json:"max,omitempty"` + Min []byte `thrift:"min,2" db:"min" json:"min,omitempty"` + NullCount *int64 `thrift:"null_count,3" db:"null_count" json:"null_count,omitempty"` + DistinctCount *int64 `thrift:"distinct_count,4" db:"distinct_count" json:"distinct_count,omitempty"` + MaxValue []byte `thrift:"max_value,5" db:"max_value" json:"max_value,omitempty"` + MinValue []byte `thrift:"min_value,6" db:"min_value" json:"min_value,omitempty"` +} + +func NewStatistics() *Statistics { + return &Statistics{} +} + +var Statistics_Max_DEFAULT []byte + +func (p *Statistics) GetMax() []byte { + return p.Max +} +var Statistics_Min_DEFAULT []byte + +func (p *Statistics) GetMin() []byte { + return p.Min +} +var Statistics_NullCount_DEFAULT int64 +func (p *Statistics) GetNullCount() int64 { + if !p.IsSetNullCount() { + return Statistics_NullCount_DEFAULT + } +return *p.NullCount +} +var Statistics_DistinctCount_DEFAULT int64 +func (p *Statistics) GetDistinctCount() int64 { + if !p.IsSetDistinctCount() { + return Statistics_DistinctCount_DEFAULT + } +return *p.DistinctCount +} +var Statistics_MaxValue_DEFAULT []byte + +func (p *Statistics) GetMaxValue() []byte { + return p.MaxValue +} +var Statistics_MinValue_DEFAULT []byte + +func (p *Statistics) GetMinValue() []byte { + return p.MinValue +} +func (p *Statistics) IsSetMax() bool { + return p.Max != nil +} + +func (p *Statistics) IsSetMin() bool { + return p.Min != nil +} + +func (p *Statistics) IsSetNullCount() bool { + return p.NullCount != nil +} + +func (p *Statistics) IsSetDistinctCount() bool { + return p.DistinctCount != nil +} + +func (p *Statistics) IsSetMaxValue() bool { + return p.MaxValue != nil +} + +func (p *Statistics) IsSetMinValue() bool { + return p.MinValue != nil +} + +func (p *Statistics) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Statistics) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Max = v +} + return nil +} + +func (p *Statistics) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Min = v +} + return nil +} + +func (p *Statistics) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.NullCount = &v +} + return nil +} + +func (p *Statistics) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.DistinctCount = &v +} + return nil +} + +func (p *Statistics) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.MaxValue = v +} + return nil +} + +func (p *Statistics) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.MinValue = v +} + return nil +} + +func (p *Statistics) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Statistics"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Statistics) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMax() { + if err := oprot.WriteFieldBegin(ctx, "max", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:max: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Max); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:max: ", p), err) } + } + return err +} + +func (p *Statistics) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMin() { + if err := oprot.WriteFieldBegin(ctx, "min", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:min: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Min); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.min (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:min: ", p), err) } + } + return err +} + +func (p *Statistics) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNullCount() { + if err := oprot.WriteFieldBegin(ctx, "null_count", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:null_count: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.NullCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.null_count (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:null_count: ", p), err) } + } + return err +} + +func (p *Statistics) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDistinctCount() { + if err := oprot.WriteFieldBegin(ctx, "distinct_count", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:distinct_count: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.DistinctCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.distinct_count (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:distinct_count: ", p), err) } + } + return err +} + +func (p *Statistics) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxValue() { + if err := oprot.WriteFieldBegin(ctx, "max_value", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_value: ", p), err) } + if err := oprot.WriteBinary(ctx, p.MaxValue); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_value (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_value: ", p), err) } + } + return err +} + +func (p *Statistics) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMinValue() { + if err := oprot.WriteFieldBegin(ctx, "min_value", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:min_value: ", p), err) } + if err := oprot.WriteBinary(ctx, p.MinValue); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.min_value (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:min_value: ", p), err) } + } + return err +} + +func (p *Statistics) Equals(other *Statistics) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if bytes.Compare(p.Max, other.Max) != 0 { return false } + if bytes.Compare(p.Min, other.Min) != 0 { return false } + if p.NullCount != other.NullCount { + if p.NullCount == nil || other.NullCount == nil { + return false + } + if (*p.NullCount) != (*other.NullCount) { return false } + } + if p.DistinctCount != other.DistinctCount { + if p.DistinctCount == nil || other.DistinctCount == nil { + return false + } + if (*p.DistinctCount) != (*other.DistinctCount) { return false } + } + if bytes.Compare(p.MaxValue, other.MaxValue) != 0 { return false } + if bytes.Compare(p.MinValue, other.MinValue) != 0 { return false } + return true +} + +func (p *Statistics) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Statistics(%+v)", *p) +} + +// Empty structs to use as logical type annotations +type StringType struct { +} + +func NewStringType() *StringType { + return &StringType{} +} + +func (p *StringType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StringType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "StringType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *StringType) Equals(other *StringType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *StringType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StringType(%+v)", *p) +} + +type UUIDType struct { +} + +func NewUUIDType() *UUIDType { + return &UUIDType{} +} + +func (p *UUIDType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UUIDType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "UUIDType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *UUIDType) Equals(other *UUIDType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *UUIDType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UUIDType(%+v)", *p) +} + +type MapType struct { +} + +func NewMapType() *MapType { + return &MapType{} +} + +func (p *MapType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MapType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "MapType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *MapType) Equals(other *MapType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *MapType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MapType(%+v)", *p) +} + +type ListType struct { +} + +func NewListType() *ListType { + return &ListType{} +} + +func (p *ListType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ListType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ListType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ListType) Equals(other *ListType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *ListType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ListType(%+v)", *p) +} + +type EnumType struct { +} + +func NewEnumType() *EnumType { + return &EnumType{} +} + +func (p *EnumType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *EnumType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "EnumType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *EnumType) Equals(other *EnumType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *EnumType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EnumType(%+v)", *p) +} + +type DateType struct { +} + +func NewDateType() *DateType { + return &DateType{} +} + +func (p *DateType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DateType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "DateType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *DateType) Equals(other *DateType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *DateType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DateType(%+v)", *p) +} + +// Logical type to annotate a column that is always null. +// +// Sometimes when discovering the schema of existing data, values are always +// null and the physical type can't be determined. This annotation signals +// the case where the physical type was guessed from all null values. +type NullType struct { +} + +func NewNullType() *NullType { + return &NullType{} +} + +func (p *NullType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NullType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "NullType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *NullType) Equals(other *NullType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *NullType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NullType(%+v)", *p) +} + +// Decimal logical type annotation +// +// To maintain forward-compatibility in v1, implementations using this logical +// type must also set scale and precision on the annotated SchemaElement. +// +// Allowed for physical types: INT32, INT64, FIXED, and BINARY +// +// Attributes: +// - Scale +// - Precision +type DecimalType struct { + Scale int32 `thrift:"scale,1,required" db:"scale" json:"scale"` + Precision int32 `thrift:"precision,2,required" db:"precision" json:"precision"` +} + +func NewDecimalType() *DecimalType { + return &DecimalType{} +} + + +func (p *DecimalType) GetScale() int32 { + return p.Scale +} + +func (p *DecimalType) GetPrecision() int32 { + return p.Precision +} +func (p *DecimalType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetScale bool = false; + var issetPrecision bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetScale = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetPrecision = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetScale{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Scale is not set")); + } + if !issetPrecision{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Precision is not set")); + } + return nil +} + +func (p *DecimalType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Scale = v +} + return nil +} + +func (p *DecimalType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Precision = v +} + return nil +} + +func (p *DecimalType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "DecimalType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *DecimalType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "scale", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:scale: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Scale)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.scale (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:scale: ", p), err) } + return err +} + +func (p *DecimalType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "precision", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:precision: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Precision)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.precision (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:precision: ", p), err) } + return err +} + +func (p *DecimalType) Equals(other *DecimalType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Scale != other.Scale { return false } + if p.Precision != other.Precision { return false } + return true +} + +func (p *DecimalType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DecimalType(%+v)", *p) +} + +// Time units for logical types +type MilliSeconds struct { +} + +func NewMilliSeconds() *MilliSeconds { + return &MilliSeconds{} +} + +func (p *MilliSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MilliSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "MilliSeconds"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *MilliSeconds) Equals(other *MilliSeconds) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *MilliSeconds) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MilliSeconds(%+v)", *p) +} + +type MicroSeconds struct { +} + +func NewMicroSeconds() *MicroSeconds { + return &MicroSeconds{} +} + +func (p *MicroSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MicroSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "MicroSeconds"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *MicroSeconds) Equals(other *MicroSeconds) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *MicroSeconds) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MicroSeconds(%+v)", *p) +} + +type NanoSeconds struct { +} + +func NewNanoSeconds() *NanoSeconds { + return &NanoSeconds{} +} + +func (p *NanoSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NanoSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "NanoSeconds"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *NanoSeconds) Equals(other *NanoSeconds) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *NanoSeconds) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NanoSeconds(%+v)", *p) +} + +// Attributes: +// - MILLIS +// - MICROS +// - NANOS +type TimeUnit struct { + MILLIS *MilliSeconds `thrift:"MILLIS,1" db:"MILLIS" json:"MILLIS,omitempty"` + MICROS *MicroSeconds `thrift:"MICROS,2" db:"MICROS" json:"MICROS,omitempty"` + NANOS *NanoSeconds `thrift:"NANOS,3" db:"NANOS" json:"NANOS,omitempty"` +} + +func NewTimeUnit() *TimeUnit { + return &TimeUnit{} +} + +var TimeUnit_MILLIS_DEFAULT *MilliSeconds +func (p *TimeUnit) GetMILLIS() *MilliSeconds { + if !p.IsSetMILLIS() { + return TimeUnit_MILLIS_DEFAULT + } +return p.MILLIS +} +var TimeUnit_MICROS_DEFAULT *MicroSeconds +func (p *TimeUnit) GetMICROS() *MicroSeconds { + if !p.IsSetMICROS() { + return TimeUnit_MICROS_DEFAULT + } +return p.MICROS +} +var TimeUnit_NANOS_DEFAULT *NanoSeconds +func (p *TimeUnit) GetNANOS() *NanoSeconds { + if !p.IsSetNANOS() { + return TimeUnit_NANOS_DEFAULT + } +return p.NANOS +} +func (p *TimeUnit) CountSetFieldsTimeUnit() int { + count := 0 + if (p.IsSetMILLIS()) { + count++ + } + if (p.IsSetMICROS()) { + count++ + } + if (p.IsSetNANOS()) { + count++ + } + return count + +} + +func (p *TimeUnit) IsSetMILLIS() bool { + return p.MILLIS != nil +} + +func (p *TimeUnit) IsSetMICROS() bool { + return p.MICROS != nil +} + +func (p *TimeUnit) IsSetNANOS() bool { + return p.NANOS != nil +} + +func (p *TimeUnit) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TimeUnit) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.MILLIS = &MilliSeconds{} + if err := p.MILLIS.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MILLIS), err) + } + return nil +} + +func (p *TimeUnit) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.MICROS = &MicroSeconds{} + if err := p.MICROS.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MICROS), err) + } + return nil +} + +func (p *TimeUnit) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.NANOS = &NanoSeconds{} + if err := p.NANOS.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.NANOS), err) + } + return nil +} + +func (p *TimeUnit) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTimeUnit(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TimeUnit"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TimeUnit) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMILLIS() { + if err := oprot.WriteFieldBegin(ctx, "MILLIS", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:MILLIS: ", p), err) } + if err := p.MILLIS.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MILLIS), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:MILLIS: ", p), err) } + } + return err +} + +func (p *TimeUnit) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMICROS() { + if err := oprot.WriteFieldBegin(ctx, "MICROS", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:MICROS: ", p), err) } + if err := p.MICROS.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MICROS), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:MICROS: ", p), err) } + } + return err +} + +func (p *TimeUnit) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNANOS() { + if err := oprot.WriteFieldBegin(ctx, "NANOS", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:NANOS: ", p), err) } + if err := p.NANOS.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.NANOS), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:NANOS: ", p), err) } + } + return err +} + +func (p *TimeUnit) Equals(other *TimeUnit) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.MILLIS.Equals(other.MILLIS) { return false } + if !p.MICROS.Equals(other.MICROS) { return false } + if !p.NANOS.Equals(other.NANOS) { return false } + return true +} + +func (p *TimeUnit) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TimeUnit(%+v)", *p) +} + +// Timestamp logical type annotation +// +// Allowed for physical types: INT64 +// +// Attributes: +// - IsAdjustedToUTC +// - Unit +type TimestampType struct { + IsAdjustedToUTC bool `thrift:"isAdjustedToUTC,1,required" db:"isAdjustedToUTC" json:"isAdjustedToUTC"` + Unit *TimeUnit `thrift:"unit,2,required" db:"unit" json:"unit"` +} + +func NewTimestampType() *TimestampType { + return &TimestampType{} +} + + +func (p *TimestampType) GetIsAdjustedToUTC() bool { + return p.IsAdjustedToUTC +} +var TimestampType_Unit_DEFAULT *TimeUnit +func (p *TimestampType) GetUnit() *TimeUnit { + if !p.IsSetUnit() { + return TimestampType_Unit_DEFAULT + } +return p.Unit +} +func (p *TimestampType) IsSetUnit() bool { + return p.Unit != nil +} + +func (p *TimestampType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetIsAdjustedToUTC bool = false; + var issetUnit bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetIsAdjustedToUTC = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetUnit = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetIsAdjustedToUTC{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsAdjustedToUTC is not set")); + } + if !issetUnit{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Unit is not set")); + } + return nil +} + +func (p *TimestampType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.IsAdjustedToUTC = v +} + return nil +} + +func (p *TimestampType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.Unit = &TimeUnit{} + if err := p.Unit.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Unit), err) + } + return nil +} + +func (p *TimestampType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TimestampType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TimestampType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "isAdjustedToUTC", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.IsAdjustedToUTC)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:isAdjustedToUTC: ", p), err) } + return err +} + +func (p *TimestampType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "unit", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:unit: ", p), err) } + if err := p.Unit.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Unit), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:unit: ", p), err) } + return err +} + +func (p *TimestampType) Equals(other *TimestampType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.IsAdjustedToUTC != other.IsAdjustedToUTC { return false } + if !p.Unit.Equals(other.Unit) { return false } + return true +} + +func (p *TimestampType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TimestampType(%+v)", *p) +} + +// Time logical type annotation +// +// Allowed for physical types: INT32 (millis), INT64 (micros, nanos) +// +// Attributes: +// - IsAdjustedToUTC +// - Unit +type TimeType struct { + IsAdjustedToUTC bool `thrift:"isAdjustedToUTC,1,required" db:"isAdjustedToUTC" json:"isAdjustedToUTC"` + Unit *TimeUnit `thrift:"unit,2,required" db:"unit" json:"unit"` +} + +func NewTimeType() *TimeType { + return &TimeType{} +} + + +func (p *TimeType) GetIsAdjustedToUTC() bool { + return p.IsAdjustedToUTC +} +var TimeType_Unit_DEFAULT *TimeUnit +func (p *TimeType) GetUnit() *TimeUnit { + if !p.IsSetUnit() { + return TimeType_Unit_DEFAULT + } +return p.Unit +} +func (p *TimeType) IsSetUnit() bool { + return p.Unit != nil +} + +func (p *TimeType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetIsAdjustedToUTC bool = false; + var issetUnit bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetIsAdjustedToUTC = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetUnit = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetIsAdjustedToUTC{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsAdjustedToUTC is not set")); + } + if !issetUnit{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Unit is not set")); + } + return nil +} + +func (p *TimeType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.IsAdjustedToUTC = v +} + return nil +} + +func (p *TimeType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.Unit = &TimeUnit{} + if err := p.Unit.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Unit), err) + } + return nil +} + +func (p *TimeType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TimeType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TimeType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "isAdjustedToUTC", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.IsAdjustedToUTC)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:isAdjustedToUTC: ", p), err) } + return err +} + +func (p *TimeType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "unit", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:unit: ", p), err) } + if err := p.Unit.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Unit), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:unit: ", p), err) } + return err +} + +func (p *TimeType) Equals(other *TimeType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.IsAdjustedToUTC != other.IsAdjustedToUTC { return false } + if !p.Unit.Equals(other.Unit) { return false } + return true +} + +func (p *TimeType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TimeType(%+v)", *p) +} + +// Integer logical type annotation +// +// bitWidth must be 8, 16, 32, or 64. +// +// Allowed for physical types: INT32, INT64 +// +// Attributes: +// - BitWidth +// - IsSigned +type IntType struct { + BitWidth int8 `thrift:"bitWidth,1,required" db:"bitWidth" json:"bitWidth"` + IsSigned bool `thrift:"isSigned,2,required" db:"isSigned" json:"isSigned"` +} + +func NewIntType() *IntType { + return &IntType{} +} + + +func (p *IntType) GetBitWidth() int8 { + return p.BitWidth +} + +func (p *IntType) GetIsSigned() bool { + return p.IsSigned +} +func (p *IntType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetBitWidth bool = false; + var issetIsSigned bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BYTE { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetBitWidth = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetIsSigned = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetBitWidth{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BitWidth is not set")); + } + if !issetIsSigned{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsSigned is not set")); + } + return nil +} + +func (p *IntType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadByte(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := int8(v) + p.BitWidth = temp +} + return nil +} + +func (p *IntType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.IsSigned = v +} + return nil +} + +func (p *IntType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "IntType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *IntType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "bitWidth", thrift.BYTE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:bitWidth: ", p), err) } + if err := oprot.WriteByte(ctx, int8(p.BitWidth)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bitWidth (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:bitWidth: ", p), err) } + return err +} + +func (p *IntType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "isSigned", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:isSigned: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.IsSigned)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isSigned (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:isSigned: ", p), err) } + return err +} + +func (p *IntType) Equals(other *IntType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.BitWidth != other.BitWidth { return false } + if p.IsSigned != other.IsSigned { return false } + return true +} + +func (p *IntType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IntType(%+v)", *p) +} + +// Embedded JSON logical type annotation +// +// Allowed for physical types: BINARY +type JsonType struct { +} + +func NewJsonType() *JsonType { + return &JsonType{} +} + +func (p *JsonType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *JsonType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "JsonType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *JsonType) Equals(other *JsonType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *JsonType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("JsonType(%+v)", *p) +} + +// Embedded BSON logical type annotation +// +// Allowed for physical types: BINARY +type BsonType struct { +} + +func NewBsonType() *BsonType { + return &BsonType{} +} + +func (p *BsonType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BsonType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BsonType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BsonType) Equals(other *BsonType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *BsonType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BsonType(%+v)", *p) +} + +// LogicalType annotations to replace ConvertedType. +// +// To maintain compatibility, implementations using LogicalType for a +// SchemaElement must also set the corresponding ConvertedType from the +// following table. +// +// Attributes: +// - STRING +// - MAP +// - LIST +// - ENUM +// - DECIMAL +// - DATE +// - TIME +// - TIMESTAMP +// - INTEGER +// - UNKNOWN +// - JSON +// - BSON +// - UUID +type LogicalType struct { + STRING *StringType `thrift:"STRING,1" db:"STRING" json:"STRING,omitempty"` + MAP *MapType `thrift:"MAP,2" db:"MAP" json:"MAP,omitempty"` + LIST *ListType `thrift:"LIST,3" db:"LIST" json:"LIST,omitempty"` + ENUM *EnumType `thrift:"ENUM,4" db:"ENUM" json:"ENUM,omitempty"` + DECIMAL *DecimalType `thrift:"DECIMAL,5" db:"DECIMAL" json:"DECIMAL,omitempty"` + DATE *DateType `thrift:"DATE,6" db:"DATE" json:"DATE,omitempty"` + TIME *TimeType `thrift:"TIME,7" db:"TIME" json:"TIME,omitempty"` + TIMESTAMP *TimestampType `thrift:"TIMESTAMP,8" db:"TIMESTAMP" json:"TIMESTAMP,omitempty"` + // unused field # 9 + INTEGER *IntType `thrift:"INTEGER,10" db:"INTEGER" json:"INTEGER,omitempty"` + UNKNOWN *NullType `thrift:"UNKNOWN,11" db:"UNKNOWN" json:"UNKNOWN,omitempty"` + JSON *JsonType `thrift:"JSON,12" db:"JSON" json:"JSON,omitempty"` + BSON *BsonType `thrift:"BSON,13" db:"BSON" json:"BSON,omitempty"` + UUID *UUIDType `thrift:"UUID,14" db:"UUID" json:"UUID,omitempty"` +} + +func NewLogicalType() *LogicalType { + return &LogicalType{} +} + +var LogicalType_STRING_DEFAULT *StringType +func (p *LogicalType) GetSTRING() *StringType { + if !p.IsSetSTRING() { + return LogicalType_STRING_DEFAULT + } +return p.STRING +} +var LogicalType_MAP_DEFAULT *MapType +func (p *LogicalType) GetMAP() *MapType { + if !p.IsSetMAP() { + return LogicalType_MAP_DEFAULT + } +return p.MAP +} +var LogicalType_LIST_DEFAULT *ListType +func (p *LogicalType) GetLIST() *ListType { + if !p.IsSetLIST() { + return LogicalType_LIST_DEFAULT + } +return p.LIST +} +var LogicalType_ENUM_DEFAULT *EnumType +func (p *LogicalType) GetENUM() *EnumType { + if !p.IsSetENUM() { + return LogicalType_ENUM_DEFAULT + } +return p.ENUM +} +var LogicalType_DECIMAL_DEFAULT *DecimalType +func (p *LogicalType) GetDECIMAL() *DecimalType { + if !p.IsSetDECIMAL() { + return LogicalType_DECIMAL_DEFAULT + } +return p.DECIMAL +} +var LogicalType_DATE_DEFAULT *DateType +func (p *LogicalType) GetDATE() *DateType { + if !p.IsSetDATE() { + return LogicalType_DATE_DEFAULT + } +return p.DATE +} +var LogicalType_TIME_DEFAULT *TimeType +func (p *LogicalType) GetTIME() *TimeType { + if !p.IsSetTIME() { + return LogicalType_TIME_DEFAULT + } +return p.TIME +} +var LogicalType_TIMESTAMP_DEFAULT *TimestampType +func (p *LogicalType) GetTIMESTAMP() *TimestampType { + if !p.IsSetTIMESTAMP() { + return LogicalType_TIMESTAMP_DEFAULT + } +return p.TIMESTAMP +} +var LogicalType_INTEGER_DEFAULT *IntType +func (p *LogicalType) GetINTEGER() *IntType { + if !p.IsSetINTEGER() { + return LogicalType_INTEGER_DEFAULT + } +return p.INTEGER +} +var LogicalType_UNKNOWN_DEFAULT *NullType +func (p *LogicalType) GetUNKNOWN() *NullType { + if !p.IsSetUNKNOWN() { + return LogicalType_UNKNOWN_DEFAULT + } +return p.UNKNOWN +} +var LogicalType_JSON_DEFAULT *JsonType +func (p *LogicalType) GetJSON() *JsonType { + if !p.IsSetJSON() { + return LogicalType_JSON_DEFAULT + } +return p.JSON +} +var LogicalType_BSON_DEFAULT *BsonType +func (p *LogicalType) GetBSON() *BsonType { + if !p.IsSetBSON() { + return LogicalType_BSON_DEFAULT + } +return p.BSON +} +var LogicalType_UUID_DEFAULT *UUIDType +func (p *LogicalType) GetUUID() *UUIDType { + if !p.IsSetUUID() { + return LogicalType_UUID_DEFAULT + } +return p.UUID +} +func (p *LogicalType) CountSetFieldsLogicalType() int { + count := 0 + if (p.IsSetSTRING()) { + count++ + } + if (p.IsSetMAP()) { + count++ + } + if (p.IsSetLIST()) { + count++ + } + if (p.IsSetENUM()) { + count++ + } + if (p.IsSetDECIMAL()) { + count++ + } + if (p.IsSetDATE()) { + count++ + } + if (p.IsSetTIME()) { + count++ + } + if (p.IsSetTIMESTAMP()) { + count++ + } + if (p.IsSetINTEGER()) { + count++ + } + if (p.IsSetUNKNOWN()) { + count++ + } + if (p.IsSetJSON()) { + count++ + } + if (p.IsSetBSON()) { + count++ + } + if (p.IsSetUUID()) { + count++ + } + return count + +} + +func (p *LogicalType) IsSetSTRING() bool { + return p.STRING != nil +} + +func (p *LogicalType) IsSetMAP() bool { + return p.MAP != nil +} + +func (p *LogicalType) IsSetLIST() bool { + return p.LIST != nil +} + +func (p *LogicalType) IsSetENUM() bool { + return p.ENUM != nil +} + +func (p *LogicalType) IsSetDECIMAL() bool { + return p.DECIMAL != nil +} + +func (p *LogicalType) IsSetDATE() bool { + return p.DATE != nil +} + +func (p *LogicalType) IsSetTIME() bool { + return p.TIME != nil +} + +func (p *LogicalType) IsSetTIMESTAMP() bool { + return p.TIMESTAMP != nil +} + +func (p *LogicalType) IsSetINTEGER() bool { + return p.INTEGER != nil +} + +func (p *LogicalType) IsSetUNKNOWN() bool { + return p.UNKNOWN != nil +} + +func (p *LogicalType) IsSetJSON() bool { + return p.JSON != nil +} + +func (p *LogicalType) IsSetBSON() bool { + return p.BSON != nil +} + +func (p *LogicalType) IsSetUUID() bool { + return p.UUID != nil +} + +func (p *LogicalType) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField13(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField14(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *LogicalType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.STRING = &StringType{} + if err := p.STRING.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.STRING), err) + } + return nil +} + +func (p *LogicalType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.MAP = &MapType{} + if err := p.MAP.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MAP), err) + } + return nil +} + +func (p *LogicalType) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.LIST = &ListType{} + if err := p.LIST.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.LIST), err) + } + return nil +} + +func (p *LogicalType) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.ENUM = &EnumType{} + if err := p.ENUM.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENUM), err) + } + return nil +} + +func (p *LogicalType) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.DECIMAL = &DecimalType{} + if err := p.DECIMAL.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DECIMAL), err) + } + return nil +} + +func (p *LogicalType) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + p.DATE = &DateType{} + if err := p.DATE.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DATE), err) + } + return nil +} + +func (p *LogicalType) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + p.TIME = &TimeType{} + if err := p.TIME.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TIME), err) + } + return nil +} + +func (p *LogicalType) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.TIMESTAMP = &TimestampType{} + if err := p.TIMESTAMP.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TIMESTAMP), err) + } + return nil +} + +func (p *LogicalType) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + p.INTEGER = &IntType{} + if err := p.INTEGER.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.INTEGER), err) + } + return nil +} + +func (p *LogicalType) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + p.UNKNOWN = &NullType{} + if err := p.UNKNOWN.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UNKNOWN), err) + } + return nil +} + +func (p *LogicalType) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + p.JSON = &JsonType{} + if err := p.JSON.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.JSON), err) + } + return nil +} + +func (p *LogicalType) ReadField13(ctx context.Context, iprot thrift.TProtocol) error { + p.BSON = &BsonType{} + if err := p.BSON.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BSON), err) + } + return nil +} + +func (p *LogicalType) ReadField14(ctx context.Context, iprot thrift.TProtocol) error { + p.UUID = &UUIDType{} + if err := p.UUID.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UUID), err) + } + return nil +} + +func (p *LogicalType) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsLogicalType(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "LogicalType"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + if err := p.writeField12(ctx, oprot); err != nil { return err } + if err := p.writeField13(ctx, oprot); err != nil { return err } + if err := p.writeField14(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *LogicalType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSTRING() { + if err := oprot.WriteFieldBegin(ctx, "STRING", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:STRING: ", p), err) } + if err := p.STRING.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.STRING), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:STRING: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMAP() { + if err := oprot.WriteFieldBegin(ctx, "MAP", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:MAP: ", p), err) } + if err := p.MAP.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MAP), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:MAP: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLIST() { + if err := oprot.WriteFieldBegin(ctx, "LIST", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:LIST: ", p), err) } + if err := p.LIST.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.LIST), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:LIST: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetENUM() { + if err := oprot.WriteFieldBegin(ctx, "ENUM", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ENUM: ", p), err) } + if err := p.ENUM.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENUM), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ENUM: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDECIMAL() { + if err := oprot.WriteFieldBegin(ctx, "DECIMAL", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:DECIMAL: ", p), err) } + if err := p.DECIMAL.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DECIMAL), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:DECIMAL: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDATE() { + if err := oprot.WriteFieldBegin(ctx, "DATE", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:DATE: ", p), err) } + if err := p.DATE.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DATE), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:DATE: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTIME() { + if err := oprot.WriteFieldBegin(ctx, "TIME", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:TIME: ", p), err) } + if err := p.TIME.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TIME), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:TIME: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTIMESTAMP() { + if err := oprot.WriteFieldBegin(ctx, "TIMESTAMP", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:TIMESTAMP: ", p), err) } + if err := p.TIMESTAMP.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TIMESTAMP), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:TIMESTAMP: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetINTEGER() { + if err := oprot.WriteFieldBegin(ctx, "INTEGER", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:INTEGER: ", p), err) } + if err := p.INTEGER.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.INTEGER), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:INTEGER: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUNKNOWN() { + if err := oprot.WriteFieldBegin(ctx, "UNKNOWN", thrift.STRUCT, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:UNKNOWN: ", p), err) } + if err := p.UNKNOWN.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UNKNOWN), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:UNKNOWN: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetJSON() { + if err := oprot.WriteFieldBegin(ctx, "JSON", thrift.STRUCT, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:JSON: ", p), err) } + if err := p.JSON.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.JSON), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:JSON: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField13(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBSON() { + if err := oprot.WriteFieldBegin(ctx, "BSON", thrift.STRUCT, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:BSON: ", p), err) } + if err := p.BSON.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BSON), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:BSON: ", p), err) } + } + return err +} + +func (p *LogicalType) writeField14(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUUID() { + if err := oprot.WriteFieldBegin(ctx, "UUID", thrift.STRUCT, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:UUID: ", p), err) } + if err := p.UUID.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UUID), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:UUID: ", p), err) } + } + return err +} + +func (p *LogicalType) Equals(other *LogicalType) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.STRING.Equals(other.STRING) { return false } + if !p.MAP.Equals(other.MAP) { return false } + if !p.LIST.Equals(other.LIST) { return false } + if !p.ENUM.Equals(other.ENUM) { return false } + if !p.DECIMAL.Equals(other.DECIMAL) { return false } + if !p.DATE.Equals(other.DATE) { return false } + if !p.TIME.Equals(other.TIME) { return false } + if !p.TIMESTAMP.Equals(other.TIMESTAMP) { return false } + if !p.INTEGER.Equals(other.INTEGER) { return false } + if !p.UNKNOWN.Equals(other.UNKNOWN) { return false } + if !p.JSON.Equals(other.JSON) { return false } + if !p.BSON.Equals(other.BSON) { return false } + if !p.UUID.Equals(other.UUID) { return false } + return true +} + +func (p *LogicalType) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("LogicalType(%+v)", *p) +} + +// Represents a element inside a schema definition. +// - if it is a group (inner node) then type is undefined and num_children is defined +// - if it is a primitive type (leaf) then type is defined and num_children is undefined +// the nodes are listed in depth first traversal order. +// +// Attributes: +// - Type: Data type for this field. Not set if the current element is a non-leaf node +// - TypeLength: If type is FIXED_LEN_BYTE_ARRAY, this is the byte length of the vales. +// Otherwise, if specified, this is the maximum bit length to store any of the values. +// (e.g. a low cardinality INT col could have this set to 3). Note that this is +// in the schema, and therefore fixed for the entire file. +// - RepetitionType: repetition of the field. The root of the schema does not have a repetition_type. +// All other nodes must have one +// - Name: Name of the field in the schema +// - NumChildren: Nested fields. Since thrift does not support nested fields, +// the nesting is flattened to a single list by a depth-first traversal. +// The children count is used to construct the nested relationship. +// This field is not set when the element is a primitive type +// - ConvertedType: When the schema is the result of a conversion from another model +// Used to record the original type to help with cross conversion. +// - Scale: Used when this column contains decimal data. +// See the DECIMAL converted type for more details. +// - Precision +// - FieldID: When the original schema supports field ids, this will save the +// original field id in the parquet schema +// - LogicalType: The logical type of this SchemaElement +// +// LogicalType replaces ConvertedType, but ConvertedType is still required +// for some logical types to ensure forward-compatibility in format v1. +type SchemaElement struct { + Type *Type `thrift:"type,1" db:"type" json:"type,omitempty"` + TypeLength *int32 `thrift:"type_length,2" db:"type_length" json:"type_length,omitempty"` + RepetitionType *FieldRepetitionType `thrift:"repetition_type,3" db:"repetition_type" json:"repetition_type,omitempty"` + Name string `thrift:"name,4,required" db:"name" json:"name"` + NumChildren *int32 `thrift:"num_children,5" db:"num_children" json:"num_children,omitempty"` + ConvertedType *ConvertedType `thrift:"converted_type,6" db:"converted_type" json:"converted_type,omitempty"` + Scale *int32 `thrift:"scale,7" db:"scale" json:"scale,omitempty"` + Precision *int32 `thrift:"precision,8" db:"precision" json:"precision,omitempty"` + FieldID *int32 `thrift:"field_id,9" db:"field_id" json:"field_id,omitempty"` + LogicalType *LogicalType `thrift:"logicalType,10" db:"logicalType" json:"logicalType,omitempty"` +} + +func NewSchemaElement() *SchemaElement { + return &SchemaElement{} +} + +var SchemaElement_Type_DEFAULT Type +func (p *SchemaElement) GetType() Type { + if !p.IsSetType() { + return SchemaElement_Type_DEFAULT + } +return *p.Type +} +var SchemaElement_TypeLength_DEFAULT int32 +func (p *SchemaElement) GetTypeLength() int32 { + if !p.IsSetTypeLength() { + return SchemaElement_TypeLength_DEFAULT + } +return *p.TypeLength +} +var SchemaElement_RepetitionType_DEFAULT FieldRepetitionType +func (p *SchemaElement) GetRepetitionType() FieldRepetitionType { + if !p.IsSetRepetitionType() { + return SchemaElement_RepetitionType_DEFAULT + } +return *p.RepetitionType +} + +func (p *SchemaElement) GetName() string { + return p.Name +} +var SchemaElement_NumChildren_DEFAULT int32 +func (p *SchemaElement) GetNumChildren() int32 { + if !p.IsSetNumChildren() { + return SchemaElement_NumChildren_DEFAULT + } +return *p.NumChildren +} +var SchemaElement_ConvertedType_DEFAULT ConvertedType +func (p *SchemaElement) GetConvertedType() ConvertedType { + if !p.IsSetConvertedType() { + return SchemaElement_ConvertedType_DEFAULT + } +return *p.ConvertedType +} +var SchemaElement_Scale_DEFAULT int32 +func (p *SchemaElement) GetScale() int32 { + if !p.IsSetScale() { + return SchemaElement_Scale_DEFAULT + } +return *p.Scale +} +var SchemaElement_Precision_DEFAULT int32 +func (p *SchemaElement) GetPrecision() int32 { + if !p.IsSetPrecision() { + return SchemaElement_Precision_DEFAULT + } +return *p.Precision +} +var SchemaElement_FieldID_DEFAULT int32 +func (p *SchemaElement) GetFieldID() int32 { + if !p.IsSetFieldID() { + return SchemaElement_FieldID_DEFAULT + } +return *p.FieldID +} +var SchemaElement_LogicalType_DEFAULT *LogicalType +func (p *SchemaElement) GetLogicalType() *LogicalType { + if !p.IsSetLogicalType() { + return SchemaElement_LogicalType_DEFAULT + } +return p.LogicalType +} +func (p *SchemaElement) IsSetType() bool { + return p.Type != nil +} + +func (p *SchemaElement) IsSetTypeLength() bool { + return p.TypeLength != nil +} + +func (p *SchemaElement) IsSetRepetitionType() bool { + return p.RepetitionType != nil +} + +func (p *SchemaElement) IsSetNumChildren() bool { + return p.NumChildren != nil +} + +func (p *SchemaElement) IsSetConvertedType() bool { + return p.ConvertedType != nil +} + +func (p *SchemaElement) IsSetScale() bool { + return p.Scale != nil +} + +func (p *SchemaElement) IsSetPrecision() bool { + return p.Precision != nil +} + +func (p *SchemaElement) IsSetFieldID() bool { + return p.FieldID != nil +} + +func (p *SchemaElement) IsSetLogicalType() bool { + return p.LogicalType != nil +} + +func (p *SchemaElement) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetName bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set")); + } + return nil +} + +func (p *SchemaElement) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := Type(v) + p.Type = &temp +} + return nil +} + +func (p *SchemaElement) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.TypeLength = &v +} + return nil +} + +func (p *SchemaElement) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := FieldRepetitionType(v) + p.RepetitionType = &temp +} + return nil +} + +func (p *SchemaElement) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Name = v +} + return nil +} + +func (p *SchemaElement) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.NumChildren = &v +} + return nil +} + +func (p *SchemaElement) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + temp := ConvertedType(v) + p.ConvertedType = &temp +} + return nil +} + +func (p *SchemaElement) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.Scale = &v +} + return nil +} + +func (p *SchemaElement) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) +} else { + p.Precision = &v +} + return nil +} + +func (p *SchemaElement) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.FieldID = &v +} + return nil +} + +func (p *SchemaElement) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + p.LogicalType = &LogicalType{} + if err := p.LogicalType.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.LogicalType), err) + } + return nil +} + +func (p *SchemaElement) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SchemaElement"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SchemaElement) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTypeLength() { + if err := oprot.WriteFieldBegin(ctx, "type_length", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type_length: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.TypeLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type_length (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:type_length: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRepetitionType() { + if err := oprot.WriteFieldBegin(ctx, "repetition_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:repetition_type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.RepetitionType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.repetition_type (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:repetition_type: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:name: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:name: ", p), err) } + return err +} + +func (p *SchemaElement) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNumChildren() { + if err := oprot.WriteFieldBegin(ctx, "num_children", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_children: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.NumChildren)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_children (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:num_children: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConvertedType() { + if err := oprot.WriteFieldBegin(ctx, "converted_type", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:converted_type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ConvertedType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.converted_type (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:converted_type: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetScale() { + if err := oprot.WriteFieldBegin(ctx, "scale", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:scale: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Scale)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.scale (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:scale: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPrecision() { + if err := oprot.WriteFieldBegin(ctx, "precision", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:precision: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Precision)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.precision (8) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:precision: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFieldID() { + if err := oprot.WriteFieldBegin(ctx, "field_id", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:field_id: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.FieldID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.field_id (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:field_id: ", p), err) } + } + return err +} + +func (p *SchemaElement) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLogicalType() { + if err := oprot.WriteFieldBegin(ctx, "logicalType", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:logicalType: ", p), err) } + if err := p.LogicalType.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.LogicalType), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:logicalType: ", p), err) } + } + return err +} + +func (p *SchemaElement) Equals(other *SchemaElement) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Type != other.Type { + if p.Type == nil || other.Type == nil { + return false + } + if (*p.Type) != (*other.Type) { return false } + } + if p.TypeLength != other.TypeLength { + if p.TypeLength == nil || other.TypeLength == nil { + return false + } + if (*p.TypeLength) != (*other.TypeLength) { return false } + } + if p.RepetitionType != other.RepetitionType { + if p.RepetitionType == nil || other.RepetitionType == nil { + return false + } + if (*p.RepetitionType) != (*other.RepetitionType) { return false } + } + if p.Name != other.Name { return false } + if p.NumChildren != other.NumChildren { + if p.NumChildren == nil || other.NumChildren == nil { + return false + } + if (*p.NumChildren) != (*other.NumChildren) { return false } + } + if p.ConvertedType != other.ConvertedType { + if p.ConvertedType == nil || other.ConvertedType == nil { + return false + } + if (*p.ConvertedType) != (*other.ConvertedType) { return false } + } + if p.Scale != other.Scale { + if p.Scale == nil || other.Scale == nil { + return false + } + if (*p.Scale) != (*other.Scale) { return false } + } + if p.Precision != other.Precision { + if p.Precision == nil || other.Precision == nil { + return false + } + if (*p.Precision) != (*other.Precision) { return false } + } + if p.FieldID != other.FieldID { + if p.FieldID == nil || other.FieldID == nil { + return false + } + if (*p.FieldID) != (*other.FieldID) { return false } + } + if !p.LogicalType.Equals(other.LogicalType) { return false } + return true +} + +func (p *SchemaElement) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SchemaElement(%+v)", *p) +} + +// Data page header +// +// Attributes: +// - NumValues: Number of values, including NULLs, in this data page. * +// - Encoding: Encoding used for this data page * +// - DefinitionLevelEncoding: Encoding used for definition levels * +// - RepetitionLevelEncoding: Encoding used for repetition levels * +// - Statistics: Optional statistics for the data in this page* +type DataPageHeader struct { + NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` + Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` + DefinitionLevelEncoding Encoding `thrift:"definition_level_encoding,3,required" db:"definition_level_encoding" json:"definition_level_encoding"` + RepetitionLevelEncoding Encoding `thrift:"repetition_level_encoding,4,required" db:"repetition_level_encoding" json:"repetition_level_encoding"` + Statistics *Statistics `thrift:"statistics,5" db:"statistics" json:"statistics,omitempty"` +} + +func NewDataPageHeader() *DataPageHeader { + return &DataPageHeader{} +} + + +func (p *DataPageHeader) GetNumValues() int32 { + return p.NumValues +} + +func (p *DataPageHeader) GetEncoding() Encoding { + return p.Encoding +} + +func (p *DataPageHeader) GetDefinitionLevelEncoding() Encoding { + return p.DefinitionLevelEncoding +} + +func (p *DataPageHeader) GetRepetitionLevelEncoding() Encoding { + return p.RepetitionLevelEncoding +} +var DataPageHeader_Statistics_DEFAULT *Statistics +func (p *DataPageHeader) GetStatistics() *Statistics { + if !p.IsSetStatistics() { + return DataPageHeader_Statistics_DEFAULT + } +return p.Statistics +} +func (p *DataPageHeader) IsSetStatistics() bool { + return p.Statistics != nil +} + +func (p *DataPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNumValues bool = false; + var issetEncoding bool = false; + var issetDefinitionLevelEncoding bool = false; + var issetRepetitionLevelEncoding bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNumValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetDefinitionLevelEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetRepetitionLevelEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNumValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); + } + if !issetEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); + } + if !issetDefinitionLevelEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefinitionLevelEncoding is not set")); + } + if !issetRepetitionLevelEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RepetitionLevelEncoding is not set")); + } + return nil +} + +func (p *DataPageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.NumValues = v +} + return nil +} + +func (p *DataPageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := Encoding(v) + p.Encoding = temp +} + return nil +} + +func (p *DataPageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := Encoding(v) + p.DefinitionLevelEncoding = temp +} + return nil +} + +func (p *DataPageHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := Encoding(v) + p.RepetitionLevelEncoding = temp +} + return nil +} + +func (p *DataPageHeader) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.Statistics = &Statistics{} + if err := p.Statistics.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) + } + return nil +} + +func (p *DataPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "DataPageHeader"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *DataPageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } + return err +} + +func (p *DataPageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } + return err +} + +func (p *DataPageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "definition_level_encoding", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:definition_level_encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.DefinitionLevelEncoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.definition_level_encoding (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:definition_level_encoding: ", p), err) } + return err +} + +func (p *DataPageHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "repetition_level_encoding", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:repetition_level_encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.RepetitionLevelEncoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.repetition_level_encoding (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:repetition_level_encoding: ", p), err) } + return err +} + +func (p *DataPageHeader) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatistics() { + if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:statistics: ", p), err) } + if err := p.Statistics.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:statistics: ", p), err) } + } + return err +} + +func (p *DataPageHeader) Equals(other *DataPageHeader) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.NumValues != other.NumValues { return false } + if p.Encoding != other.Encoding { return false } + if p.DefinitionLevelEncoding != other.DefinitionLevelEncoding { return false } + if p.RepetitionLevelEncoding != other.RepetitionLevelEncoding { return false } + if !p.Statistics.Equals(other.Statistics) { return false } + return true +} + +func (p *DataPageHeader) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DataPageHeader(%+v)", *p) +} + +type IndexPageHeader struct { +} + +func NewIndexPageHeader() *IndexPageHeader { + return &IndexPageHeader{} +} + +func (p *IndexPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IndexPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "IndexPageHeader"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *IndexPageHeader) Equals(other *IndexPageHeader) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *IndexPageHeader) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IndexPageHeader(%+v)", *p) +} + +// Attributes: +// - NumValues: Number of values in the dictionary * +// - Encoding: Encoding using this dictionary page * +// - IsSorted: If true, the entries in the dictionary are sorted in ascending order * +type DictionaryPageHeader struct { + NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` + Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` + IsSorted *bool `thrift:"is_sorted,3" db:"is_sorted" json:"is_sorted,omitempty"` +} + +func NewDictionaryPageHeader() *DictionaryPageHeader { + return &DictionaryPageHeader{} +} + + +func (p *DictionaryPageHeader) GetNumValues() int32 { + return p.NumValues +} + +func (p *DictionaryPageHeader) GetEncoding() Encoding { + return p.Encoding +} +var DictionaryPageHeader_IsSorted_DEFAULT bool +func (p *DictionaryPageHeader) GetIsSorted() bool { + if !p.IsSetIsSorted() { + return DictionaryPageHeader_IsSorted_DEFAULT + } +return *p.IsSorted +} +func (p *DictionaryPageHeader) IsSetIsSorted() bool { + return p.IsSorted != nil +} + +func (p *DictionaryPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNumValues bool = false; + var issetEncoding bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNumValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNumValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); + } + if !issetEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); + } + return nil +} + +func (p *DictionaryPageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.NumValues = v +} + return nil +} + +func (p *DictionaryPageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := Encoding(v) + p.Encoding = temp +} + return nil +} + +func (p *DictionaryPageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.IsSorted = &v +} + return nil +} + +func (p *DictionaryPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "DictionaryPageHeader"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *DictionaryPageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } + return err +} + +func (p *DictionaryPageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } + return err +} + +func (p *DictionaryPageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIsSorted() { + if err := oprot.WriteFieldBegin(ctx, "is_sorted", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_sorted: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IsSorted)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_sorted (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_sorted: ", p), err) } + } + return err +} + +func (p *DictionaryPageHeader) Equals(other *DictionaryPageHeader) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.NumValues != other.NumValues { return false } + if p.Encoding != other.Encoding { return false } + if p.IsSorted != other.IsSorted { + if p.IsSorted == nil || other.IsSorted == nil { + return false + } + if (*p.IsSorted) != (*other.IsSorted) { return false } + } + return true +} + +func (p *DictionaryPageHeader) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DictionaryPageHeader(%+v)", *p) +} + +// New page format allowing reading levels without decompressing the data +// Repetition and definition levels are uncompressed +// The remaining section containing the data is compressed if is_compressed is true +// +// +// Attributes: +// - NumValues: Number of values, including NULLs, in this data page. * +// - NumNulls: Number of NULL values, in this data page. +// Number of non-null = num_values - num_nulls which is also the number of values in the data section * +// - NumRows: Number of rows in this data page. which means pages change on record boundaries (r = 0) * +// - Encoding: Encoding used for data in this page * +// - DefinitionLevelsByteLength: length of the definition levels +// - RepetitionLevelsByteLength: length of the repetition levels +// - IsCompressed: whether the values are compressed. +// Which means the section of the page between +// definition_levels_byte_length + repetition_levels_byte_length + 1 and compressed_page_size (included) +// is compressed with the compression_codec. +// If missing it is considered compressed +// - Statistics: optional statistics for the data in this page * +type DataPageHeaderV2 struct { + NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` + NumNulls int32 `thrift:"num_nulls,2,required" db:"num_nulls" json:"num_nulls"` + NumRows int32 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` + Encoding Encoding `thrift:"encoding,4,required" db:"encoding" json:"encoding"` + DefinitionLevelsByteLength int32 `thrift:"definition_levels_byte_length,5,required" db:"definition_levels_byte_length" json:"definition_levels_byte_length"` + RepetitionLevelsByteLength int32 `thrift:"repetition_levels_byte_length,6,required" db:"repetition_levels_byte_length" json:"repetition_levels_byte_length"` + IsCompressed bool `thrift:"is_compressed,7" db:"is_compressed" json:"is_compressed"` + Statistics *Statistics `thrift:"statistics,8" db:"statistics" json:"statistics,omitempty"` +} + +func NewDataPageHeaderV2() *DataPageHeaderV2 { + return &DataPageHeaderV2{ +IsCompressed: true, +} +} + + +func (p *DataPageHeaderV2) GetNumValues() int32 { + return p.NumValues +} + +func (p *DataPageHeaderV2) GetNumNulls() int32 { + return p.NumNulls +} + +func (p *DataPageHeaderV2) GetNumRows() int32 { + return p.NumRows +} + +func (p *DataPageHeaderV2) GetEncoding() Encoding { + return p.Encoding +} + +func (p *DataPageHeaderV2) GetDefinitionLevelsByteLength() int32 { + return p.DefinitionLevelsByteLength +} + +func (p *DataPageHeaderV2) GetRepetitionLevelsByteLength() int32 { + return p.RepetitionLevelsByteLength +} +var DataPageHeaderV2_IsCompressed_DEFAULT bool = true + +func (p *DataPageHeaderV2) GetIsCompressed() bool { + return p.IsCompressed +} +var DataPageHeaderV2_Statistics_DEFAULT *Statistics +func (p *DataPageHeaderV2) GetStatistics() *Statistics { + if !p.IsSetStatistics() { + return DataPageHeaderV2_Statistics_DEFAULT + } +return p.Statistics +} +func (p *DataPageHeaderV2) IsSetIsCompressed() bool { + return p.IsCompressed != DataPageHeaderV2_IsCompressed_DEFAULT +} + +func (p *DataPageHeaderV2) IsSetStatistics() bool { + return p.Statistics != nil +} + +func (p *DataPageHeaderV2) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNumValues bool = false; + var issetNumNulls bool = false; + var issetNumRows bool = false; + var issetEncoding bool = false; + var issetDefinitionLevelsByteLength bool = false; + var issetRepetitionLevelsByteLength bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNumValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNumNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetNumRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetDefinitionLevelsByteLength = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + issetRepetitionLevelsByteLength = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNumValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); + } + if !issetNumNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumNulls is not set")); + } + if !issetNumRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); + } + if !issetEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); + } + if !issetDefinitionLevelsByteLength{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefinitionLevelsByteLength is not set")); + } + if !issetRepetitionLevelsByteLength{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RepetitionLevelsByteLength is not set")); + } + return nil +} + +func (p *DataPageHeaderV2) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.NumValues = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.NumNulls = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.NumRows = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := Encoding(v) + p.Encoding = temp +} + return nil +} + +func (p *DataPageHeaderV2) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.DefinitionLevelsByteLength = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.RepetitionLevelsByteLength = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.IsCompressed = v +} + return nil +} + +func (p *DataPageHeaderV2) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.Statistics = &Statistics{} + if err := p.Statistics.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) + } + return nil +} + +func (p *DataPageHeaderV2) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "DataPageHeaderV2"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *DataPageHeaderV2) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_nulls", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:num_nulls: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumNulls)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:num_nulls: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.encoding (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:encoding: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "definition_levels_byte_length", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:definition_levels_byte_length: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.DefinitionLevelsByteLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.definition_levels_byte_length (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:definition_levels_byte_length: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "repetition_levels_byte_length", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:repetition_levels_byte_length: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.RepetitionLevelsByteLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.repetition_levels_byte_length (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:repetition_levels_byte_length: ", p), err) } + return err +} + +func (p *DataPageHeaderV2) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIsCompressed() { + if err := oprot.WriteFieldBegin(ctx, "is_compressed", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_compressed: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.IsCompressed)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_compressed (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_compressed: ", p), err) } + } + return err +} + +func (p *DataPageHeaderV2) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatistics() { + if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:statistics: ", p), err) } + if err := p.Statistics.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:statistics: ", p), err) } + } + return err +} + +func (p *DataPageHeaderV2) Equals(other *DataPageHeaderV2) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.NumValues != other.NumValues { return false } + if p.NumNulls != other.NumNulls { return false } + if p.NumRows != other.NumRows { return false } + if p.Encoding != other.Encoding { return false } + if p.DefinitionLevelsByteLength != other.DefinitionLevelsByteLength { return false } + if p.RepetitionLevelsByteLength != other.RepetitionLevelsByteLength { return false } + if p.IsCompressed != other.IsCompressed { return false } + if !p.Statistics.Equals(other.Statistics) { return false } + return true +} + +func (p *DataPageHeaderV2) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DataPageHeaderV2(%+v)", *p) +} + +// Block-based algorithm type annotation. * +type SplitBlockAlgorithm struct { +} + +func NewSplitBlockAlgorithm() *SplitBlockAlgorithm { + return &SplitBlockAlgorithm{} +} + +func (p *SplitBlockAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SplitBlockAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SplitBlockAlgorithm"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SplitBlockAlgorithm) Equals(other *SplitBlockAlgorithm) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *SplitBlockAlgorithm) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SplitBlockAlgorithm(%+v)", *p) +} + +// The algorithm used in Bloom filter. * +// +// Attributes: +// - BLOCK: Block-based Bloom filter. * +type BloomFilterAlgorithm struct { + BLOCK *SplitBlockAlgorithm `thrift:"BLOCK,1" db:"BLOCK" json:"BLOCK,omitempty"` +} + +func NewBloomFilterAlgorithm() *BloomFilterAlgorithm { + return &BloomFilterAlgorithm{} +} + +var BloomFilterAlgorithm_BLOCK_DEFAULT *SplitBlockAlgorithm +func (p *BloomFilterAlgorithm) GetBLOCK() *SplitBlockAlgorithm { + if !p.IsSetBLOCK() { + return BloomFilterAlgorithm_BLOCK_DEFAULT + } +return p.BLOCK +} +func (p *BloomFilterAlgorithm) CountSetFieldsBloomFilterAlgorithm() int { + count := 0 + if (p.IsSetBLOCK()) { + count++ + } + return count + +} + +func (p *BloomFilterAlgorithm) IsSetBLOCK() bool { + return p.BLOCK != nil +} + +func (p *BloomFilterAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BloomFilterAlgorithm) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.BLOCK = &SplitBlockAlgorithm{} + if err := p.BLOCK.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BLOCK), err) + } + return nil +} + +func (p *BloomFilterAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsBloomFilterAlgorithm(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "BloomFilterAlgorithm"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BloomFilterAlgorithm) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBLOCK() { + if err := oprot.WriteFieldBegin(ctx, "BLOCK", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:BLOCK: ", p), err) } + if err := p.BLOCK.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BLOCK), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:BLOCK: ", p), err) } + } + return err +} + +func (p *BloomFilterAlgorithm) Equals(other *BloomFilterAlgorithm) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.BLOCK.Equals(other.BLOCK) { return false } + return true +} + +func (p *BloomFilterAlgorithm) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BloomFilterAlgorithm(%+v)", *p) +} + +// Hash strategy type annotation. xxHash is an extremely fast non-cryptographic hash +// algorithm. It uses 64 bits version of xxHash. +// +type XxHash struct { +} + +func NewXxHash() *XxHash { + return &XxHash{} +} + +func (p *XxHash) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *XxHash) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "XxHash"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *XxHash) Equals(other *XxHash) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *XxHash) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("XxHash(%+v)", *p) +} + +// The hash function used in Bloom filter. This function takes the hash of a column value +// using plain encoding. +// +// +// Attributes: +// - XXHASH: xxHash Strategy. * +type BloomFilterHash struct { + XXHASH *XxHash `thrift:"XXHASH,1" db:"XXHASH" json:"XXHASH,omitempty"` +} + +func NewBloomFilterHash() *BloomFilterHash { + return &BloomFilterHash{} +} + +var BloomFilterHash_XXHASH_DEFAULT *XxHash +func (p *BloomFilterHash) GetXXHASH() *XxHash { + if !p.IsSetXXHASH() { + return BloomFilterHash_XXHASH_DEFAULT + } +return p.XXHASH +} +func (p *BloomFilterHash) CountSetFieldsBloomFilterHash() int { + count := 0 + if (p.IsSetXXHASH()) { + count++ + } + return count + +} + +func (p *BloomFilterHash) IsSetXXHASH() bool { + return p.XXHASH != nil +} + +func (p *BloomFilterHash) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BloomFilterHash) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.XXHASH = &XxHash{} + if err := p.XXHASH.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.XXHASH), err) + } + return nil +} + +func (p *BloomFilterHash) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsBloomFilterHash(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "BloomFilterHash"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BloomFilterHash) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetXXHASH() { + if err := oprot.WriteFieldBegin(ctx, "XXHASH", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:XXHASH: ", p), err) } + if err := p.XXHASH.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.XXHASH), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:XXHASH: ", p), err) } + } + return err +} + +func (p *BloomFilterHash) Equals(other *BloomFilterHash) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.XXHASH.Equals(other.XXHASH) { return false } + return true +} + +func (p *BloomFilterHash) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BloomFilterHash(%+v)", *p) +} + +// The compression used in the Bloom filter. +// +type Uncompressed struct { +} + +func NewUncompressed() *Uncompressed { + return &Uncompressed{} +} + +func (p *Uncompressed) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Uncompressed) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Uncompressed"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Uncompressed) Equals(other *Uncompressed) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *Uncompressed) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Uncompressed(%+v)", *p) +} + +// Attributes: +// - UNCOMPRESSED +type BloomFilterCompression struct { + UNCOMPRESSED *Uncompressed `thrift:"UNCOMPRESSED,1" db:"UNCOMPRESSED" json:"UNCOMPRESSED,omitempty"` +} + +func NewBloomFilterCompression() *BloomFilterCompression { + return &BloomFilterCompression{} +} + +var BloomFilterCompression_UNCOMPRESSED_DEFAULT *Uncompressed +func (p *BloomFilterCompression) GetUNCOMPRESSED() *Uncompressed { + if !p.IsSetUNCOMPRESSED() { + return BloomFilterCompression_UNCOMPRESSED_DEFAULT + } +return p.UNCOMPRESSED +} +func (p *BloomFilterCompression) CountSetFieldsBloomFilterCompression() int { + count := 0 + if (p.IsSetUNCOMPRESSED()) { + count++ + } + return count + +} + +func (p *BloomFilterCompression) IsSetUNCOMPRESSED() bool { + return p.UNCOMPRESSED != nil +} + +func (p *BloomFilterCompression) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BloomFilterCompression) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.UNCOMPRESSED = &Uncompressed{} + if err := p.UNCOMPRESSED.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UNCOMPRESSED), err) + } + return nil +} + +func (p *BloomFilterCompression) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsBloomFilterCompression(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "BloomFilterCompression"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BloomFilterCompression) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUNCOMPRESSED() { + if err := oprot.WriteFieldBegin(ctx, "UNCOMPRESSED", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:UNCOMPRESSED: ", p), err) } + if err := p.UNCOMPRESSED.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UNCOMPRESSED), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:UNCOMPRESSED: ", p), err) } + } + return err +} + +func (p *BloomFilterCompression) Equals(other *BloomFilterCompression) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.UNCOMPRESSED.Equals(other.UNCOMPRESSED) { return false } + return true +} + +func (p *BloomFilterCompression) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BloomFilterCompression(%+v)", *p) +} + +// Bloom filter header is stored at beginning of Bloom filter data of each column +// and followed by its bitset. +// +// +// Attributes: +// - NumBytes: The size of bitset in bytes * +// - Algorithm: The algorithm for setting bits. * +// - Hash: The hash function used for Bloom filter. * +// - Compression: The compression used in the Bloom filter * +type BloomFilterHeader struct { + NumBytes int32 `thrift:"numBytes,1,required" db:"numBytes" json:"numBytes"` + Algorithm *BloomFilterAlgorithm `thrift:"algorithm,2,required" db:"algorithm" json:"algorithm"` + Hash *BloomFilterHash `thrift:"hash,3,required" db:"hash" json:"hash"` + Compression *BloomFilterCompression `thrift:"compression,4,required" db:"compression" json:"compression"` +} + +func NewBloomFilterHeader() *BloomFilterHeader { + return &BloomFilterHeader{} +} + + +func (p *BloomFilterHeader) GetNumBytes() int32 { + return p.NumBytes +} +var BloomFilterHeader_Algorithm_DEFAULT *BloomFilterAlgorithm +func (p *BloomFilterHeader) GetAlgorithm() *BloomFilterAlgorithm { + if !p.IsSetAlgorithm() { + return BloomFilterHeader_Algorithm_DEFAULT + } +return p.Algorithm +} +var BloomFilterHeader_Hash_DEFAULT *BloomFilterHash +func (p *BloomFilterHeader) GetHash() *BloomFilterHash { + if !p.IsSetHash() { + return BloomFilterHeader_Hash_DEFAULT + } +return p.Hash +} +var BloomFilterHeader_Compression_DEFAULT *BloomFilterCompression +func (p *BloomFilterHeader) GetCompression() *BloomFilterCompression { + if !p.IsSetCompression() { + return BloomFilterHeader_Compression_DEFAULT + } +return p.Compression +} +func (p *BloomFilterHeader) IsSetAlgorithm() bool { + return p.Algorithm != nil +} + +func (p *BloomFilterHeader) IsSetHash() bool { + return p.Hash != nil +} + +func (p *BloomFilterHeader) IsSetCompression() bool { + return p.Compression != nil +} + +func (p *BloomFilterHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNumBytes bool = false; + var issetAlgorithm bool = false; + var issetHash bool = false; + var issetCompression bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNumBytes = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetAlgorithm = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetHash = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetCompression = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNumBytes{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumBytes is not set")); + } + if !issetAlgorithm{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Algorithm is not set")); + } + if !issetHash{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Hash is not set")); + } + if !issetCompression{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Compression is not set")); + } + return nil +} + +func (p *BloomFilterHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.NumBytes = v +} + return nil +} + +func (p *BloomFilterHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.Algorithm = &BloomFilterAlgorithm{} + if err := p.Algorithm.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Algorithm), err) + } + return nil +} + +func (p *BloomFilterHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Hash = &BloomFilterHash{} + if err := p.Hash.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Hash), err) + } + return nil +} + +func (p *BloomFilterHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Compression = &BloomFilterCompression{} + if err := p.Compression.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Compression), err) + } + return nil +} + +func (p *BloomFilterHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BloomFilterHeader"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BloomFilterHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "numBytes", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:numBytes: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.NumBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.numBytes (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:numBytes: ", p), err) } + return err +} + +func (p *BloomFilterHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "algorithm", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:algorithm: ", p), err) } + if err := p.Algorithm.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Algorithm), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:algorithm: ", p), err) } + return err +} + +func (p *BloomFilterHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "hash", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hash: ", p), err) } + if err := p.Hash.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Hash), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hash: ", p), err) } + return err +} + +func (p *BloomFilterHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "compression", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:compression: ", p), err) } + if err := p.Compression.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Compression), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:compression: ", p), err) } + return err +} + +func (p *BloomFilterHeader) Equals(other *BloomFilterHeader) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.NumBytes != other.NumBytes { return false } + if !p.Algorithm.Equals(other.Algorithm) { return false } + if !p.Hash.Equals(other.Hash) { return false } + if !p.Compression.Equals(other.Compression) { return false } + return true +} + +func (p *BloomFilterHeader) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BloomFilterHeader(%+v)", *p) +} + +// Attributes: +// - Type: the type of the page: indicates which of the *_header fields is set * +// - UncompressedPageSize: Uncompressed page size in bytes (not including this header) * +// - CompressedPageSize: Compressed (and potentially encrypted) page size in bytes, not including this header * +// - Crc: The 32bit CRC for the page, to be be calculated as follows: +// - Using the standard CRC32 algorithm +// - On the data only, i.e. this header should not be included. 'Data' +// hereby refers to the concatenation of the repetition levels, the +// definition levels and the column value, in this exact order. +// - On the encoded versions of the repetition levels, definition levels and +// column values +// - On the compressed versions of the repetition levels, definition levels +// and column values where possible; +// - For v1 data pages, the repetition levels, definition levels and column +// values are always compressed together. If a compression scheme is +// specified, the CRC shall be calculated on the compressed version of +// this concatenation. If no compression scheme is specified, the CRC +// shall be calculated on the uncompressed version of this concatenation. +// - For v2 data pages, the repetition levels and definition levels are +// handled separately from the data and are never compressed (only +// encoded). If a compression scheme is specified, the CRC shall be +// calculated on the concatenation of the uncompressed repetition levels, +// uncompressed definition levels and the compressed column values. +// If no compression scheme is specified, the CRC shall be calculated on +// the uncompressed concatenation. +// - In encrypted columns, CRC is calculated after page encryption; the +// encryption itself is performed after page compression (if compressed) +// If enabled, this allows for disabling checksumming in HDFS if only a few +// pages need to be read. +// +// - DataPageHeader +// - IndexPageHeader +// - DictionaryPageHeader +// - DataPageHeaderV2 +type PageHeader struct { + Type PageType `thrift:"type,1,required" db:"type" json:"type"` + UncompressedPageSize int32 `thrift:"uncompressed_page_size,2,required" db:"uncompressed_page_size" json:"uncompressed_page_size"` + CompressedPageSize int32 `thrift:"compressed_page_size,3,required" db:"compressed_page_size" json:"compressed_page_size"` + Crc *int32 `thrift:"crc,4" db:"crc" json:"crc,omitempty"` + DataPageHeader *DataPageHeader `thrift:"data_page_header,5" db:"data_page_header" json:"data_page_header,omitempty"` + IndexPageHeader *IndexPageHeader `thrift:"index_page_header,6" db:"index_page_header" json:"index_page_header,omitempty"` + DictionaryPageHeader *DictionaryPageHeader `thrift:"dictionary_page_header,7" db:"dictionary_page_header" json:"dictionary_page_header,omitempty"` + DataPageHeaderV2 *DataPageHeaderV2 `thrift:"data_page_header_v2,8" db:"data_page_header_v2" json:"data_page_header_v2,omitempty"` +} + +func NewPageHeader() *PageHeader { + return &PageHeader{} +} + + +func (p *PageHeader) GetType() PageType { + return p.Type +} + +func (p *PageHeader) GetUncompressedPageSize() int32 { + return p.UncompressedPageSize +} + +func (p *PageHeader) GetCompressedPageSize() int32 { + return p.CompressedPageSize +} +var PageHeader_Crc_DEFAULT int32 +func (p *PageHeader) GetCrc() int32 { + if !p.IsSetCrc() { + return PageHeader_Crc_DEFAULT + } +return *p.Crc +} +var PageHeader_DataPageHeader_DEFAULT *DataPageHeader +func (p *PageHeader) GetDataPageHeader() *DataPageHeader { + if !p.IsSetDataPageHeader() { + return PageHeader_DataPageHeader_DEFAULT + } +return p.DataPageHeader +} +var PageHeader_IndexPageHeader_DEFAULT *IndexPageHeader +func (p *PageHeader) GetIndexPageHeader() *IndexPageHeader { + if !p.IsSetIndexPageHeader() { + return PageHeader_IndexPageHeader_DEFAULT + } +return p.IndexPageHeader +} +var PageHeader_DictionaryPageHeader_DEFAULT *DictionaryPageHeader +func (p *PageHeader) GetDictionaryPageHeader() *DictionaryPageHeader { + if !p.IsSetDictionaryPageHeader() { + return PageHeader_DictionaryPageHeader_DEFAULT + } +return p.DictionaryPageHeader +} +var PageHeader_DataPageHeaderV2_DEFAULT *DataPageHeaderV2 +func (p *PageHeader) GetDataPageHeaderV2() *DataPageHeaderV2 { + if !p.IsSetDataPageHeaderV2() { + return PageHeader_DataPageHeaderV2_DEFAULT + } +return p.DataPageHeaderV2 +} +func (p *PageHeader) IsSetCrc() bool { + return p.Crc != nil +} + +func (p *PageHeader) IsSetDataPageHeader() bool { + return p.DataPageHeader != nil +} + +func (p *PageHeader) IsSetIndexPageHeader() bool { + return p.IndexPageHeader != nil +} + +func (p *PageHeader) IsSetDictionaryPageHeader() bool { + return p.DictionaryPageHeader != nil +} + +func (p *PageHeader) IsSetDataPageHeaderV2() bool { + return p.DataPageHeaderV2 != nil +} + +func (p *PageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetType bool = false; + var issetUncompressedPageSize bool = false; + var issetCompressedPageSize bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetUncompressedPageSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetCompressedPageSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); + } + if !issetUncompressedPageSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field UncompressedPageSize is not set")); + } + if !issetCompressedPageSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field CompressedPageSize is not set")); + } + return nil +} + +func (p *PageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := PageType(v) + p.Type = temp +} + return nil +} + +func (p *PageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.UncompressedPageSize = v +} + return nil +} + +func (p *PageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.CompressedPageSize = v +} + return nil +} + +func (p *PageHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Crc = &v +} + return nil +} + +func (p *PageHeader) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.DataPageHeader = &DataPageHeader{} + if err := p.DataPageHeader.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DataPageHeader), err) + } + return nil +} + +func (p *PageHeader) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + p.IndexPageHeader = &IndexPageHeader{} + if err := p.IndexPageHeader.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.IndexPageHeader), err) + } + return nil +} + +func (p *PageHeader) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + p.DictionaryPageHeader = &DictionaryPageHeader{} + if err := p.DictionaryPageHeader.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DictionaryPageHeader), err) + } + return nil +} + +func (p *PageHeader) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.DataPageHeaderV2 = &DataPageHeaderV2{ + IsCompressed: true, +} + if err := p.DataPageHeaderV2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DataPageHeaderV2), err) + } + return nil +} + +func (p *PageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "PageHeader"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *PageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } + return err +} + +func (p *PageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "uncompressed_page_size", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:uncompressed_page_size: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.UncompressedPageSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.uncompressed_page_size (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:uncompressed_page_size: ", p), err) } + return err +} + +func (p *PageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "compressed_page_size", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:compressed_page_size: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.CompressedPageSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:compressed_page_size: ", p), err) } + return err +} + +func (p *PageHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCrc() { + if err := oprot.WriteFieldBegin(ctx, "crc", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:crc: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Crc)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.crc (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:crc: ", p), err) } + } + return err +} + +func (p *PageHeader) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDataPageHeader() { + if err := oprot.WriteFieldBegin(ctx, "data_page_header", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:data_page_header: ", p), err) } + if err := p.DataPageHeader.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DataPageHeader), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:data_page_header: ", p), err) } + } + return err +} + +func (p *PageHeader) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIndexPageHeader() { + if err := oprot.WriteFieldBegin(ctx, "index_page_header", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:index_page_header: ", p), err) } + if err := p.IndexPageHeader.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.IndexPageHeader), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:index_page_header: ", p), err) } + } + return err +} + +func (p *PageHeader) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDictionaryPageHeader() { + if err := oprot.WriteFieldBegin(ctx, "dictionary_page_header", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:dictionary_page_header: ", p), err) } + if err := p.DictionaryPageHeader.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DictionaryPageHeader), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:dictionary_page_header: ", p), err) } + } + return err +} + +func (p *PageHeader) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDataPageHeaderV2() { + if err := oprot.WriteFieldBegin(ctx, "data_page_header_v2", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:data_page_header_v2: ", p), err) } + if err := p.DataPageHeaderV2.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DataPageHeaderV2), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:data_page_header_v2: ", p), err) } + } + return err +} + +func (p *PageHeader) Equals(other *PageHeader) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Type != other.Type { return false } + if p.UncompressedPageSize != other.UncompressedPageSize { return false } + if p.CompressedPageSize != other.CompressedPageSize { return false } + if p.Crc != other.Crc { + if p.Crc == nil || other.Crc == nil { + return false + } + if (*p.Crc) != (*other.Crc) { return false } + } + if !p.DataPageHeader.Equals(other.DataPageHeader) { return false } + if !p.IndexPageHeader.Equals(other.IndexPageHeader) { return false } + if !p.DictionaryPageHeader.Equals(other.DictionaryPageHeader) { return false } + if !p.DataPageHeaderV2.Equals(other.DataPageHeaderV2) { return false } + return true +} + +func (p *PageHeader) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PageHeader(%+v)", *p) +} + +// Wrapper struct to store key values +// +// Attributes: +// - Key +// - Value +type KeyValue struct { + Key string `thrift:"key,1,required" db:"key" json:"key"` + Value *string `thrift:"value,2" db:"value" json:"value,omitempty"` +} + +func NewKeyValue() *KeyValue { + return &KeyValue{} +} + + +func (p *KeyValue) GetKey() string { + return p.Key +} +var KeyValue_Value_DEFAULT string +func (p *KeyValue) GetValue() string { + if !p.IsSetValue() { + return KeyValue_Value_DEFAULT + } +return *p.Value +} +func (p *KeyValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *KeyValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKey bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetKey = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKey{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")); + } + return nil +} + +func (p *KeyValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Key = v +} + return nil +} + +func (p *KeyValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *KeyValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "KeyValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *KeyValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } + return err +} + +func (p *KeyValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } + } + return err +} + +func (p *KeyValue) Equals(other *KeyValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { return false } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *KeyValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("KeyValue(%+v)", *p) +} + +// Wrapper struct to specify sort order +// +// Attributes: +// - ColumnIdx: The column index (in this row group) * +// - Descending: If true, indicates this column is sorted in descending order. * +// - NullsFirst: If true, nulls will come before non-null values, otherwise, +// nulls go at the end. +type SortingColumn struct { + ColumnIdx int32 `thrift:"column_idx,1,required" db:"column_idx" json:"column_idx"` + Descending bool `thrift:"descending,2,required" db:"descending" json:"descending"` + NullsFirst bool `thrift:"nulls_first,3,required" db:"nulls_first" json:"nulls_first"` +} + +func NewSortingColumn() *SortingColumn { + return &SortingColumn{} +} + + +func (p *SortingColumn) GetColumnIdx() int32 { + return p.ColumnIdx +} + +func (p *SortingColumn) GetDescending() bool { + return p.Descending +} + +func (p *SortingColumn) GetNullsFirst() bool { + return p.NullsFirst +} +func (p *SortingColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetColumnIdx bool = false; + var issetDescending bool = false; + var issetNullsFirst bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetColumnIdx = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetDescending = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetNullsFirst = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetColumnIdx{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColumnIdx is not set")); + } + if !issetDescending{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Descending is not set")); + } + if !issetNullsFirst{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NullsFirst is not set")); + } + return nil +} + +func (p *SortingColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ColumnIdx = v +} + return nil +} + +func (p *SortingColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Descending = v +} + return nil +} + +func (p *SortingColumn) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.NullsFirst = v +} + return nil +} + +func (p *SortingColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SortingColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SortingColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "column_idx", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:column_idx: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.ColumnIdx)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.column_idx (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:column_idx: ", p), err) } + return err +} + +func (p *SortingColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "descending", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:descending: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.Descending)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.descending (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:descending: ", p), err) } + return err +} + +func (p *SortingColumn) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls_first", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:nulls_first: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.NullsFirst)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls_first (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:nulls_first: ", p), err) } + return err +} + +func (p *SortingColumn) Equals(other *SortingColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ColumnIdx != other.ColumnIdx { return false } + if p.Descending != other.Descending { return false } + if p.NullsFirst != other.NullsFirst { return false } + return true +} + +func (p *SortingColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SortingColumn(%+v)", *p) +} + +// statistics of a given page type and encoding +// +// Attributes: +// - PageType: the page type (data/dic/...) * +// - Encoding: encoding of the page * +// - Count: number of pages of this type with this encoding * +type PageEncodingStats struct { + PageType PageType `thrift:"page_type,1,required" db:"page_type" json:"page_type"` + Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` + Count int32 `thrift:"count,3,required" db:"count" json:"count"` +} + +func NewPageEncodingStats() *PageEncodingStats { + return &PageEncodingStats{} +} + + +func (p *PageEncodingStats) GetPageType() PageType { + return p.PageType +} + +func (p *PageEncodingStats) GetEncoding() Encoding { + return p.Encoding +} + +func (p *PageEncodingStats) GetCount() int32 { + return p.Count +} +func (p *PageEncodingStats) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetPageType bool = false; + var issetEncoding bool = false; + var issetCount bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetPageType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetEncoding = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetCount = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetPageType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PageType is not set")); + } + if !issetEncoding{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); + } + if !issetCount{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Count is not set")); + } + return nil +} + +func (p *PageEncodingStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := PageType(v) + p.PageType = temp +} + return nil +} + +func (p *PageEncodingStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := Encoding(v) + p.Encoding = temp +} + return nil +} + +func (p *PageEncodingStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Count = v +} + return nil +} + +func (p *PageEncodingStats) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "PageEncodingStats"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *PageEncodingStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "page_type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:page_type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.PageType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.page_type (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:page_type: ", p), err) } + return err +} + +func (p *PageEncodingStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } + return err +} + +func (p *PageEncodingStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:count: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:count: ", p), err) } + return err +} + +func (p *PageEncodingStats) Equals(other *PageEncodingStats) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.PageType != other.PageType { return false } + if p.Encoding != other.Encoding { return false } + if p.Count != other.Count { return false } + return true +} + +func (p *PageEncodingStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PageEncodingStats(%+v)", *p) +} + +// Description for column metadata +// +// Attributes: +// - Type: Type of this column * +// - Encodings: Set of all encodings used for this column. The purpose is to validate +// whether we can decode those pages. * +// - PathInSchema: Path in schema * +// - Codec: Compression codec * +// - NumValues: Number of values in this column * +// - TotalUncompressedSize: total byte size of all uncompressed pages in this column chunk (including the headers) * +// - TotalCompressedSize: total byte size of all compressed, and potentially encrypted, pages +// in this column chunk (including the headers) * +// - KeyValueMetadata: Optional key/value metadata * +// - DataPageOffset: Byte offset from beginning of file to first data page * +// - IndexPageOffset: Byte offset from beginning of file to root index page * +// - DictionaryPageOffset: Byte offset from the beginning of file to first (only) dictionary page * +// - Statistics: optional statistics for this column chunk +// - EncodingStats: Set of all encodings used for pages in this column chunk. +// This information can be used to determine if all data pages are +// dictionary encoded for example * +// - BloomFilterOffset: Byte offset from beginning of file to Bloom filter data. * +type ColumnMetaData struct { + Type Type `thrift:"type,1,required" db:"type" json:"type"` + Encodings []Encoding `thrift:"encodings,2,required" db:"encodings" json:"encodings"` + PathInSchema []string `thrift:"path_in_schema,3,required" db:"path_in_schema" json:"path_in_schema"` + Codec CompressionCodec `thrift:"codec,4,required" db:"codec" json:"codec"` + NumValues int64 `thrift:"num_values,5,required" db:"num_values" json:"num_values"` + TotalUncompressedSize int64 `thrift:"total_uncompressed_size,6,required" db:"total_uncompressed_size" json:"total_uncompressed_size"` + TotalCompressedSize int64 `thrift:"total_compressed_size,7,required" db:"total_compressed_size" json:"total_compressed_size"` + KeyValueMetadata []*KeyValue `thrift:"key_value_metadata,8" db:"key_value_metadata" json:"key_value_metadata,omitempty"` + DataPageOffset int64 `thrift:"data_page_offset,9,required" db:"data_page_offset" json:"data_page_offset"` + IndexPageOffset *int64 `thrift:"index_page_offset,10" db:"index_page_offset" json:"index_page_offset,omitempty"` + DictionaryPageOffset *int64 `thrift:"dictionary_page_offset,11" db:"dictionary_page_offset" json:"dictionary_page_offset,omitempty"` + Statistics *Statistics `thrift:"statistics,12" db:"statistics" json:"statistics,omitempty"` + EncodingStats []*PageEncodingStats `thrift:"encoding_stats,13" db:"encoding_stats" json:"encoding_stats,omitempty"` + BloomFilterOffset *int64 `thrift:"bloom_filter_offset,14" db:"bloom_filter_offset" json:"bloom_filter_offset,omitempty"` +} + +func NewColumnMetaData() *ColumnMetaData { + return &ColumnMetaData{} +} + + +func (p *ColumnMetaData) GetType() Type { + return p.Type +} + +func (p *ColumnMetaData) GetEncodings() []Encoding { + return p.Encodings +} + +func (p *ColumnMetaData) GetPathInSchema() []string { + return p.PathInSchema +} + +func (p *ColumnMetaData) GetCodec() CompressionCodec { + return p.Codec +} + +func (p *ColumnMetaData) GetNumValues() int64 { + return p.NumValues +} + +func (p *ColumnMetaData) GetTotalUncompressedSize() int64 { + return p.TotalUncompressedSize +} + +func (p *ColumnMetaData) GetTotalCompressedSize() int64 { + return p.TotalCompressedSize +} +var ColumnMetaData_KeyValueMetadata_DEFAULT []*KeyValue + +func (p *ColumnMetaData) GetKeyValueMetadata() []*KeyValue { + return p.KeyValueMetadata +} + +func (p *ColumnMetaData) GetDataPageOffset() int64 { + return p.DataPageOffset +} +var ColumnMetaData_IndexPageOffset_DEFAULT int64 +func (p *ColumnMetaData) GetIndexPageOffset() int64 { + if !p.IsSetIndexPageOffset() { + return ColumnMetaData_IndexPageOffset_DEFAULT + } +return *p.IndexPageOffset +} +var ColumnMetaData_DictionaryPageOffset_DEFAULT int64 +func (p *ColumnMetaData) GetDictionaryPageOffset() int64 { + if !p.IsSetDictionaryPageOffset() { + return ColumnMetaData_DictionaryPageOffset_DEFAULT + } +return *p.DictionaryPageOffset +} +var ColumnMetaData_Statistics_DEFAULT *Statistics +func (p *ColumnMetaData) GetStatistics() *Statistics { + if !p.IsSetStatistics() { + return ColumnMetaData_Statistics_DEFAULT + } +return p.Statistics +} +var ColumnMetaData_EncodingStats_DEFAULT []*PageEncodingStats + +func (p *ColumnMetaData) GetEncodingStats() []*PageEncodingStats { + return p.EncodingStats +} +var ColumnMetaData_BloomFilterOffset_DEFAULT int64 +func (p *ColumnMetaData) GetBloomFilterOffset() int64 { + if !p.IsSetBloomFilterOffset() { + return ColumnMetaData_BloomFilterOffset_DEFAULT + } +return *p.BloomFilterOffset +} +func (p *ColumnMetaData) IsSetKeyValueMetadata() bool { + return p.KeyValueMetadata != nil +} + +func (p *ColumnMetaData) IsSetIndexPageOffset() bool { + return p.IndexPageOffset != nil +} + +func (p *ColumnMetaData) IsSetDictionaryPageOffset() bool { + return p.DictionaryPageOffset != nil +} + +func (p *ColumnMetaData) IsSetStatistics() bool { + return p.Statistics != nil +} + +func (p *ColumnMetaData) IsSetEncodingStats() bool { + return p.EncodingStats != nil +} + +func (p *ColumnMetaData) IsSetBloomFilterOffset() bool { + return p.BloomFilterOffset != nil +} + +func (p *ColumnMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetType bool = false; + var issetEncodings bool = false; + var issetPathInSchema bool = false; + var issetCodec bool = false; + var issetNumValues bool = false; + var issetTotalUncompressedSize bool = false; + var issetTotalCompressedSize bool = false; + var issetDataPageOffset bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetEncodings = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetPathInSchema = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetCodec = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetNumValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + issetTotalUncompressedSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + issetTotalCompressedSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + issetDataPageOffset = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.LIST { + if err := p.ReadField13(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.I64 { + if err := p.ReadField14(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); + } + if !issetEncodings{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encodings is not set")); + } + if !issetPathInSchema{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PathInSchema is not set")); + } + if !issetCodec{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Codec is not set")); + } + if !issetNumValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); + } + if !issetTotalUncompressedSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalUncompressedSize is not set")); + } + if !issetTotalCompressedSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalCompressedSize is not set")); + } + if !issetDataPageOffset{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DataPageOffset is not set")); + } + return nil +} + +func (p *ColumnMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := Type(v) + p.Type = temp +} + return nil +} + +func (p *ColumnMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]Encoding, 0, size) + p.Encodings = tSlice + for i := 0; i < size; i ++ { +var _elem0 Encoding + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := Encoding(v) + _elem0 = temp +} + p.Encodings = append(p.Encodings, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnMetaData) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.PathInSchema = tSlice + for i := 0; i < size; i ++ { +var _elem1 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem1 = v +} + p.PathInSchema = append(p.PathInSchema, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnMetaData) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := CompressionCodec(v) + p.Codec = temp +} + return nil +} + +func (p *ColumnMetaData) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.NumValues = v +} + return nil +} + +func (p *ColumnMetaData) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.TotalUncompressedSize = v +} + return nil +} + +func (p *ColumnMetaData) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.TotalCompressedSize = v +} + return nil +} + +func (p *ColumnMetaData) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.KeyValueMetadata = tSlice + for i := 0; i < size; i ++ { + _elem2 := &KeyValue{} + if err := _elem2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.KeyValueMetadata = append(p.KeyValueMetadata, _elem2) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnMetaData) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.DataPageOffset = v +} + return nil +} + +func (p *ColumnMetaData) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) +} else { + p.IndexPageOffset = &v +} + return nil +} + +func (p *ColumnMetaData) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) +} else { + p.DictionaryPageOffset = &v +} + return nil +} + +func (p *ColumnMetaData) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + p.Statistics = &Statistics{} + if err := p.Statistics.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) + } + return nil +} + +func (p *ColumnMetaData) ReadField13(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PageEncodingStats, 0, size) + p.EncodingStats = tSlice + for i := 0; i < size; i ++ { + _elem3 := &PageEncodingStats{} + if err := _elem3.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.EncodingStats = append(p.EncodingStats, _elem3) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnMetaData) ReadField14(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 14: ", err) +} else { + p.BloomFilterOffset = &v +} + return nil +} + +func (p *ColumnMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ColumnMetaData"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + if err := p.writeField12(ctx, oprot); err != nil { return err } + if err := p.writeField13(ctx, oprot); err != nil { return err } + if err := p.writeField14(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ColumnMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encodings", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encodings: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I32, len(p.Encodings)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Encodings { + if err := oprot.WriteI32(ctx, int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encodings: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "path_in_schema", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:path_in_schema: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.PathInSchema)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PathInSchema { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:path_in_schema: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "codec", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:codec: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Codec)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.codec (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:codec: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_values: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.NumValues)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_values (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:num_values: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "total_uncompressed_size", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:total_uncompressed_size: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TotalUncompressedSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_uncompressed_size (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:total_uncompressed_size: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "total_compressed_size", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:total_compressed_size: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TotalCompressedSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_compressed_size (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:total_compressed_size: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetKeyValueMetadata() { + if err := oprot.WriteFieldBegin(ctx, "key_value_metadata", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:key_value_metadata: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.KeyValueMetadata)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.KeyValueMetadata { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:key_value_metadata: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "data_page_offset", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:data_page_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.DataPageOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.data_page_offset (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:data_page_offset: ", p), err) } + return err +} + +func (p *ColumnMetaData) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIndexPageOffset() { + if err := oprot.WriteFieldBegin(ctx, "index_page_offset", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:index_page_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.IndexPageOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.index_page_offset (10) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:index_page_offset: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDictionaryPageOffset() { + if err := oprot.WriteFieldBegin(ctx, "dictionary_page_offset", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:dictionary_page_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.DictionaryPageOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dictionary_page_offset (11) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:dictionary_page_offset: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatistics() { + if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:statistics: ", p), err) } + if err := p.Statistics.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:statistics: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) writeField13(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEncodingStats() { + if err := oprot.WriteFieldBegin(ctx, "encoding_stats", thrift.LIST, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:encoding_stats: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.EncodingStats)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.EncodingStats { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:encoding_stats: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) writeField14(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBloomFilterOffset() { + if err := oprot.WriteFieldBegin(ctx, "bloom_filter_offset", thrift.I64, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:bloom_filter_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.BloomFilterOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bloom_filter_offset (14) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:bloom_filter_offset: ", p), err) } + } + return err +} + +func (p *ColumnMetaData) Equals(other *ColumnMetaData) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Type != other.Type { return false } + if len(p.Encodings) != len(other.Encodings) { return false } + for i, _tgt := range p.Encodings { + _src4 := other.Encodings[i] + if _tgt != _src4 { return false } + } + if len(p.PathInSchema) != len(other.PathInSchema) { return false } + for i, _tgt := range p.PathInSchema { + _src5 := other.PathInSchema[i] + if _tgt != _src5 { return false } + } + if p.Codec != other.Codec { return false } + if p.NumValues != other.NumValues { return false } + if p.TotalUncompressedSize != other.TotalUncompressedSize { return false } + if p.TotalCompressedSize != other.TotalCompressedSize { return false } + if len(p.KeyValueMetadata) != len(other.KeyValueMetadata) { return false } + for i, _tgt := range p.KeyValueMetadata { + _src6 := other.KeyValueMetadata[i] + if !_tgt.Equals(_src6) { return false } + } + if p.DataPageOffset != other.DataPageOffset { return false } + if p.IndexPageOffset != other.IndexPageOffset { + if p.IndexPageOffset == nil || other.IndexPageOffset == nil { + return false + } + if (*p.IndexPageOffset) != (*other.IndexPageOffset) { return false } + } + if p.DictionaryPageOffset != other.DictionaryPageOffset { + if p.DictionaryPageOffset == nil || other.DictionaryPageOffset == nil { + return false + } + if (*p.DictionaryPageOffset) != (*other.DictionaryPageOffset) { return false } + } + if !p.Statistics.Equals(other.Statistics) { return false } + if len(p.EncodingStats) != len(other.EncodingStats) { return false } + for i, _tgt := range p.EncodingStats { + _src7 := other.EncodingStats[i] + if !_tgt.Equals(_src7) { return false } + } + if p.BloomFilterOffset != other.BloomFilterOffset { + if p.BloomFilterOffset == nil || other.BloomFilterOffset == nil { + return false + } + if (*p.BloomFilterOffset) != (*other.BloomFilterOffset) { return false } + } + return true +} + +func (p *ColumnMetaData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ColumnMetaData(%+v)", *p) +} + +type EncryptionWithFooterKey struct { +} + +func NewEncryptionWithFooterKey() *EncryptionWithFooterKey { + return &EncryptionWithFooterKey{} +} + +func (p *EncryptionWithFooterKey) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *EncryptionWithFooterKey) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "EncryptionWithFooterKey"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *EncryptionWithFooterKey) Equals(other *EncryptionWithFooterKey) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *EncryptionWithFooterKey) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EncryptionWithFooterKey(%+v)", *p) +} + +// Attributes: +// - PathInSchema: Column path in schema * +// - KeyMetadata: Retrieval metadata of column encryption key * +type EncryptionWithColumnKey struct { + PathInSchema []string `thrift:"path_in_schema,1,required" db:"path_in_schema" json:"path_in_schema"` + KeyMetadata []byte `thrift:"key_metadata,2" db:"key_metadata" json:"key_metadata,omitempty"` +} + +func NewEncryptionWithColumnKey() *EncryptionWithColumnKey { + return &EncryptionWithColumnKey{} +} + + +func (p *EncryptionWithColumnKey) GetPathInSchema() []string { + return p.PathInSchema +} +var EncryptionWithColumnKey_KeyMetadata_DEFAULT []byte + +func (p *EncryptionWithColumnKey) GetKeyMetadata() []byte { + return p.KeyMetadata +} +func (p *EncryptionWithColumnKey) IsSetKeyMetadata() bool { + return p.KeyMetadata != nil +} + +func (p *EncryptionWithColumnKey) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetPathInSchema bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetPathInSchema = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetPathInSchema{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PathInSchema is not set")); + } + return nil +} + +func (p *EncryptionWithColumnKey) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.PathInSchema = tSlice + for i := 0; i < size; i ++ { +var _elem8 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem8 = v +} + p.PathInSchema = append(p.PathInSchema, _elem8) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *EncryptionWithColumnKey) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.KeyMetadata = v +} + return nil +} + +func (p *EncryptionWithColumnKey) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "EncryptionWithColumnKey"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *EncryptionWithColumnKey) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "path_in_schema", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:path_in_schema: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.PathInSchema)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PathInSchema { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:path_in_schema: ", p), err) } + return err +} + +func (p *EncryptionWithColumnKey) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetKeyMetadata() { + if err := oprot.WriteFieldBegin(ctx, "key_metadata", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:key_metadata: ", p), err) } + if err := oprot.WriteBinary(ctx, p.KeyMetadata); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key_metadata (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:key_metadata: ", p), err) } + } + return err +} + +func (p *EncryptionWithColumnKey) Equals(other *EncryptionWithColumnKey) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.PathInSchema) != len(other.PathInSchema) { return false } + for i, _tgt := range p.PathInSchema { + _src9 := other.PathInSchema[i] + if _tgt != _src9 { return false } + } + if bytes.Compare(p.KeyMetadata, other.KeyMetadata) != 0 { return false } + return true +} + +func (p *EncryptionWithColumnKey) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EncryptionWithColumnKey(%+v)", *p) +} + +// Attributes: +// - ENCRYPTION_WITH_FOOTER_KEY +// - ENCRYPTION_WITH_COLUMN_KEY +type ColumnCryptoMetaData struct { + ENCRYPTION_WITH_FOOTER_KEY *EncryptionWithFooterKey `thrift:"ENCRYPTION_WITH_FOOTER_KEY,1" db:"ENCRYPTION_WITH_FOOTER_KEY" json:"ENCRYPTION_WITH_FOOTER_KEY,omitempty"` + ENCRYPTION_WITH_COLUMN_KEY *EncryptionWithColumnKey `thrift:"ENCRYPTION_WITH_COLUMN_KEY,2" db:"ENCRYPTION_WITH_COLUMN_KEY" json:"ENCRYPTION_WITH_COLUMN_KEY,omitempty"` +} + +func NewColumnCryptoMetaData() *ColumnCryptoMetaData { + return &ColumnCryptoMetaData{} +} + +var ColumnCryptoMetaData_ENCRYPTION_WITH_FOOTER_KEY_DEFAULT *EncryptionWithFooterKey +func (p *ColumnCryptoMetaData) GetENCRYPTION_WITH_FOOTER_KEY() *EncryptionWithFooterKey { + if !p.IsSetENCRYPTION_WITH_FOOTER_KEY() { + return ColumnCryptoMetaData_ENCRYPTION_WITH_FOOTER_KEY_DEFAULT + } +return p.ENCRYPTION_WITH_FOOTER_KEY +} +var ColumnCryptoMetaData_ENCRYPTION_WITH_COLUMN_KEY_DEFAULT *EncryptionWithColumnKey +func (p *ColumnCryptoMetaData) GetENCRYPTION_WITH_COLUMN_KEY() *EncryptionWithColumnKey { + if !p.IsSetENCRYPTION_WITH_COLUMN_KEY() { + return ColumnCryptoMetaData_ENCRYPTION_WITH_COLUMN_KEY_DEFAULT + } +return p.ENCRYPTION_WITH_COLUMN_KEY +} +func (p *ColumnCryptoMetaData) CountSetFieldsColumnCryptoMetaData() int { + count := 0 + if (p.IsSetENCRYPTION_WITH_FOOTER_KEY()) { + count++ + } + if (p.IsSetENCRYPTION_WITH_COLUMN_KEY()) { + count++ + } + return count + +} + +func (p *ColumnCryptoMetaData) IsSetENCRYPTION_WITH_FOOTER_KEY() bool { + return p.ENCRYPTION_WITH_FOOTER_KEY != nil +} + +func (p *ColumnCryptoMetaData) IsSetENCRYPTION_WITH_COLUMN_KEY() bool { + return p.ENCRYPTION_WITH_COLUMN_KEY != nil +} + +func (p *ColumnCryptoMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ColumnCryptoMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.ENCRYPTION_WITH_FOOTER_KEY = &EncryptionWithFooterKey{} + if err := p.ENCRYPTION_WITH_FOOTER_KEY.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENCRYPTION_WITH_FOOTER_KEY), err) + } + return nil +} + +func (p *ColumnCryptoMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ENCRYPTION_WITH_COLUMN_KEY = &EncryptionWithColumnKey{} + if err := p.ENCRYPTION_WITH_COLUMN_KEY.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENCRYPTION_WITH_COLUMN_KEY), err) + } + return nil +} + +func (p *ColumnCryptoMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsColumnCryptoMetaData(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "ColumnCryptoMetaData"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ColumnCryptoMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetENCRYPTION_WITH_FOOTER_KEY() { + if err := oprot.WriteFieldBegin(ctx, "ENCRYPTION_WITH_FOOTER_KEY", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ENCRYPTION_WITH_FOOTER_KEY: ", p), err) } + if err := p.ENCRYPTION_WITH_FOOTER_KEY.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENCRYPTION_WITH_FOOTER_KEY), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ENCRYPTION_WITH_FOOTER_KEY: ", p), err) } + } + return err +} + +func (p *ColumnCryptoMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetENCRYPTION_WITH_COLUMN_KEY() { + if err := oprot.WriteFieldBegin(ctx, "ENCRYPTION_WITH_COLUMN_KEY", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ENCRYPTION_WITH_COLUMN_KEY: ", p), err) } + if err := p.ENCRYPTION_WITH_COLUMN_KEY.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENCRYPTION_WITH_COLUMN_KEY), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ENCRYPTION_WITH_COLUMN_KEY: ", p), err) } + } + return err +} + +func (p *ColumnCryptoMetaData) Equals(other *ColumnCryptoMetaData) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.ENCRYPTION_WITH_FOOTER_KEY.Equals(other.ENCRYPTION_WITH_FOOTER_KEY) { return false } + if !p.ENCRYPTION_WITH_COLUMN_KEY.Equals(other.ENCRYPTION_WITH_COLUMN_KEY) { return false } + return true +} + +func (p *ColumnCryptoMetaData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ColumnCryptoMetaData(%+v)", *p) +} + +// Attributes: +// - FilePath: File where column data is stored. If not set, assumed to be same file as +// metadata. This path is relative to the current file. +// +// - FileOffset: Byte offset in file_path to the ColumnMetaData * +// - MetaData: Column metadata for this chunk. This is the same content as what is at +// file_path/file_offset. Having it here has it replicated in the file +// metadata. +// +// - OffsetIndexOffset: File offset of ColumnChunk's OffsetIndex * +// - OffsetIndexLength: Size of ColumnChunk's OffsetIndex, in bytes * +// - ColumnIndexOffset: File offset of ColumnChunk's ColumnIndex * +// - ColumnIndexLength: Size of ColumnChunk's ColumnIndex, in bytes * +// - CryptoMetadata: Crypto metadata of encrypted columns * +// - EncryptedColumnMetadata: Encrypted column metadata for this chunk * +type ColumnChunk struct { + FilePath *string `thrift:"file_path,1" db:"file_path" json:"file_path,omitempty"` + FileOffset int64 `thrift:"file_offset,2,required" db:"file_offset" json:"file_offset"` + MetaData *ColumnMetaData `thrift:"meta_data,3" db:"meta_data" json:"meta_data,omitempty"` + OffsetIndexOffset *int64 `thrift:"offset_index_offset,4" db:"offset_index_offset" json:"offset_index_offset,omitempty"` + OffsetIndexLength *int32 `thrift:"offset_index_length,5" db:"offset_index_length" json:"offset_index_length,omitempty"` + ColumnIndexOffset *int64 `thrift:"column_index_offset,6" db:"column_index_offset" json:"column_index_offset,omitempty"` + ColumnIndexLength *int32 `thrift:"column_index_length,7" db:"column_index_length" json:"column_index_length,omitempty"` + CryptoMetadata *ColumnCryptoMetaData `thrift:"crypto_metadata,8" db:"crypto_metadata" json:"crypto_metadata,omitempty"` + EncryptedColumnMetadata []byte `thrift:"encrypted_column_metadata,9" db:"encrypted_column_metadata" json:"encrypted_column_metadata,omitempty"` +} + +func NewColumnChunk() *ColumnChunk { + return &ColumnChunk{} +} + +var ColumnChunk_FilePath_DEFAULT string +func (p *ColumnChunk) GetFilePath() string { + if !p.IsSetFilePath() { + return ColumnChunk_FilePath_DEFAULT + } +return *p.FilePath +} + +func (p *ColumnChunk) GetFileOffset() int64 { + return p.FileOffset +} +var ColumnChunk_MetaData_DEFAULT *ColumnMetaData +func (p *ColumnChunk) GetMetaData() *ColumnMetaData { + if !p.IsSetMetaData() { + return ColumnChunk_MetaData_DEFAULT + } +return p.MetaData +} +var ColumnChunk_OffsetIndexOffset_DEFAULT int64 +func (p *ColumnChunk) GetOffsetIndexOffset() int64 { + if !p.IsSetOffsetIndexOffset() { + return ColumnChunk_OffsetIndexOffset_DEFAULT + } +return *p.OffsetIndexOffset +} +var ColumnChunk_OffsetIndexLength_DEFAULT int32 +func (p *ColumnChunk) GetOffsetIndexLength() int32 { + if !p.IsSetOffsetIndexLength() { + return ColumnChunk_OffsetIndexLength_DEFAULT + } +return *p.OffsetIndexLength +} +var ColumnChunk_ColumnIndexOffset_DEFAULT int64 +func (p *ColumnChunk) GetColumnIndexOffset() int64 { + if !p.IsSetColumnIndexOffset() { + return ColumnChunk_ColumnIndexOffset_DEFAULT + } +return *p.ColumnIndexOffset +} +var ColumnChunk_ColumnIndexLength_DEFAULT int32 +func (p *ColumnChunk) GetColumnIndexLength() int32 { + if !p.IsSetColumnIndexLength() { + return ColumnChunk_ColumnIndexLength_DEFAULT + } +return *p.ColumnIndexLength +} +var ColumnChunk_CryptoMetadata_DEFAULT *ColumnCryptoMetaData +func (p *ColumnChunk) GetCryptoMetadata() *ColumnCryptoMetaData { + if !p.IsSetCryptoMetadata() { + return ColumnChunk_CryptoMetadata_DEFAULT + } +return p.CryptoMetadata +} +var ColumnChunk_EncryptedColumnMetadata_DEFAULT []byte + +func (p *ColumnChunk) GetEncryptedColumnMetadata() []byte { + return p.EncryptedColumnMetadata +} +func (p *ColumnChunk) IsSetFilePath() bool { + return p.FilePath != nil +} + +func (p *ColumnChunk) IsSetMetaData() bool { + return p.MetaData != nil +} + +func (p *ColumnChunk) IsSetOffsetIndexOffset() bool { + return p.OffsetIndexOffset != nil +} + +func (p *ColumnChunk) IsSetOffsetIndexLength() bool { + return p.OffsetIndexLength != nil +} + +func (p *ColumnChunk) IsSetColumnIndexOffset() bool { + return p.ColumnIndexOffset != nil +} + +func (p *ColumnChunk) IsSetColumnIndexLength() bool { + return p.ColumnIndexLength != nil +} + +func (p *ColumnChunk) IsSetCryptoMetadata() bool { + return p.CryptoMetadata != nil +} + +func (p *ColumnChunk) IsSetEncryptedColumnMetadata() bool { + return p.EncryptedColumnMetadata != nil +} + +func (p *ColumnChunk) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFileOffset bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetFileOffset = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFileOffset{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FileOffset is not set")); + } + return nil +} + +func (p *ColumnChunk) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.FilePath = &v +} + return nil +} + +func (p *ColumnChunk) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.FileOffset = v +} + return nil +} + +func (p *ColumnChunk) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.MetaData = &ColumnMetaData{} + if err := p.MetaData.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetaData), err) + } + return nil +} + +func (p *ColumnChunk) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.OffsetIndexOffset = &v +} + return nil +} + +func (p *ColumnChunk) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.OffsetIndexLength = &v +} + return nil +} + +func (p *ColumnChunk) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.ColumnIndexOffset = &v +} + return nil +} + +func (p *ColumnChunk) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.ColumnIndexLength = &v +} + return nil +} + +func (p *ColumnChunk) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.CryptoMetadata = &ColumnCryptoMetaData{} + if err := p.CryptoMetadata.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CryptoMetadata), err) + } + return nil +} + +func (p *ColumnChunk) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.EncryptedColumnMetadata = v +} + return nil +} + +func (p *ColumnChunk) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ColumnChunk"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ColumnChunk) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFilePath() { + if err := oprot.WriteFieldBegin(ctx, "file_path", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:file_path: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.FilePath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_path (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:file_path: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "file_offset", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:file_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.FileOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_offset (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:file_offset: ", p), err) } + return err +} + +func (p *ColumnChunk) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMetaData() { + if err := oprot.WriteFieldBegin(ctx, "meta_data", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:meta_data: ", p), err) } + if err := p.MetaData.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetaData), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:meta_data: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOffsetIndexOffset() { + if err := oprot.WriteFieldBegin(ctx, "offset_index_offset", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:offset_index_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.OffsetIndexOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.offset_index_offset (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:offset_index_offset: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOffsetIndexLength() { + if err := oprot.WriteFieldBegin(ctx, "offset_index_length", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:offset_index_length: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.OffsetIndexLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.offset_index_length (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:offset_index_length: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumnIndexOffset() { + if err := oprot.WriteFieldBegin(ctx, "column_index_offset", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:column_index_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ColumnIndexOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.column_index_offset (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:column_index_offset: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumnIndexLength() { + if err := oprot.WriteFieldBegin(ctx, "column_index_length", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:column_index_length: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ColumnIndexLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.column_index_length (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:column_index_length: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCryptoMetadata() { + if err := oprot.WriteFieldBegin(ctx, "crypto_metadata", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:crypto_metadata: ", p), err) } + if err := p.CryptoMetadata.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CryptoMetadata), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:crypto_metadata: ", p), err) } + } + return err +} + +func (p *ColumnChunk) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEncryptedColumnMetadata() { + if err := oprot.WriteFieldBegin(ctx, "encrypted_column_metadata", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:encrypted_column_metadata: ", p), err) } + if err := oprot.WriteBinary(ctx, p.EncryptedColumnMetadata); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.encrypted_column_metadata (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:encrypted_column_metadata: ", p), err) } + } + return err +} + +func (p *ColumnChunk) Equals(other *ColumnChunk) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FilePath != other.FilePath { + if p.FilePath == nil || other.FilePath == nil { + return false + } + if (*p.FilePath) != (*other.FilePath) { return false } + } + if p.FileOffset != other.FileOffset { return false } + if !p.MetaData.Equals(other.MetaData) { return false } + if p.OffsetIndexOffset != other.OffsetIndexOffset { + if p.OffsetIndexOffset == nil || other.OffsetIndexOffset == nil { + return false + } + if (*p.OffsetIndexOffset) != (*other.OffsetIndexOffset) { return false } + } + if p.OffsetIndexLength != other.OffsetIndexLength { + if p.OffsetIndexLength == nil || other.OffsetIndexLength == nil { + return false + } + if (*p.OffsetIndexLength) != (*other.OffsetIndexLength) { return false } + } + if p.ColumnIndexOffset != other.ColumnIndexOffset { + if p.ColumnIndexOffset == nil || other.ColumnIndexOffset == nil { + return false + } + if (*p.ColumnIndexOffset) != (*other.ColumnIndexOffset) { return false } + } + if p.ColumnIndexLength != other.ColumnIndexLength { + if p.ColumnIndexLength == nil || other.ColumnIndexLength == nil { + return false + } + if (*p.ColumnIndexLength) != (*other.ColumnIndexLength) { return false } + } + if !p.CryptoMetadata.Equals(other.CryptoMetadata) { return false } + if bytes.Compare(p.EncryptedColumnMetadata, other.EncryptedColumnMetadata) != 0 { return false } + return true +} + +func (p *ColumnChunk) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ColumnChunk(%+v)", *p) +} + +// Attributes: +// - Columns: Metadata for each column chunk in this row group. +// This list must have the same order as the SchemaElement list in FileMetaData. +// +// - TotalByteSize: Total byte size of all the uncompressed column data in this row group * +// - NumRows: Number of rows in this row group * +// - SortingColumns: If set, specifies a sort ordering of the rows in this RowGroup. +// The sorting columns can be a subset of all the columns. +// - FileOffset: Byte offset from beginning of file to first page (data or dictionary) +// in this row group * +// - TotalCompressedSize: Total byte size of all compressed (and potentially encrypted) column data +// in this row group * +// - Ordinal: Row group ordinal in the file * +type RowGroup struct { + Columns []*ColumnChunk `thrift:"columns,1,required" db:"columns" json:"columns"` + TotalByteSize int64 `thrift:"total_byte_size,2,required" db:"total_byte_size" json:"total_byte_size"` + NumRows int64 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` + SortingColumns []*SortingColumn `thrift:"sorting_columns,4" db:"sorting_columns" json:"sorting_columns,omitempty"` + FileOffset *int64 `thrift:"file_offset,5" db:"file_offset" json:"file_offset,omitempty"` + TotalCompressedSize *int64 `thrift:"total_compressed_size,6" db:"total_compressed_size" json:"total_compressed_size,omitempty"` + Ordinal *int16 `thrift:"ordinal,7" db:"ordinal" json:"ordinal,omitempty"` +} + +func NewRowGroup() *RowGroup { + return &RowGroup{} +} + + +func (p *RowGroup) GetColumns() []*ColumnChunk { + return p.Columns +} + +func (p *RowGroup) GetTotalByteSize() int64 { + return p.TotalByteSize +} + +func (p *RowGroup) GetNumRows() int64 { + return p.NumRows +} +var RowGroup_SortingColumns_DEFAULT []*SortingColumn + +func (p *RowGroup) GetSortingColumns() []*SortingColumn { + return p.SortingColumns +} +var RowGroup_FileOffset_DEFAULT int64 +func (p *RowGroup) GetFileOffset() int64 { + if !p.IsSetFileOffset() { + return RowGroup_FileOffset_DEFAULT + } +return *p.FileOffset +} +var RowGroup_TotalCompressedSize_DEFAULT int64 +func (p *RowGroup) GetTotalCompressedSize() int64 { + if !p.IsSetTotalCompressedSize() { + return RowGroup_TotalCompressedSize_DEFAULT + } +return *p.TotalCompressedSize +} +var RowGroup_Ordinal_DEFAULT int16 +func (p *RowGroup) GetOrdinal() int16 { + if !p.IsSetOrdinal() { + return RowGroup_Ordinal_DEFAULT + } +return *p.Ordinal +} +func (p *RowGroup) IsSetSortingColumns() bool { + return p.SortingColumns != nil +} + +func (p *RowGroup) IsSetFileOffset() bool { + return p.FileOffset != nil +} + +func (p *RowGroup) IsSetTotalCompressedSize() bool { + return p.TotalCompressedSize != nil +} + +func (p *RowGroup) IsSetOrdinal() bool { + return p.Ordinal != nil +} + +func (p *RowGroup) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetColumns bool = false; + var issetTotalByteSize bool = false; + var issetNumRows bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetColumns = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTotalByteSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetNumRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I16 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetColumns{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Columns is not set")); + } + if !issetTotalByteSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalByteSize is not set")); + } + if !issetNumRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); + } + return nil +} + +func (p *RowGroup) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ColumnChunk, 0, size) + p.Columns = tSlice + for i := 0; i < size; i ++ { + _elem10 := &ColumnChunk{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Columns = append(p.Columns, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *RowGroup) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.TotalByteSize = v +} + return nil +} + +func (p *RowGroup) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.NumRows = v +} + return nil +} + +func (p *RowGroup) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SortingColumn, 0, size) + p.SortingColumns = tSlice + for i := 0; i < size; i ++ { + _elem11 := &SortingColumn{} + if err := _elem11.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) + } + p.SortingColumns = append(p.SortingColumns, _elem11) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *RowGroup) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.FileOffset = &v +} + return nil +} + +func (p *RowGroup) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.TotalCompressedSize = &v +} + return nil +} + +func (p *RowGroup) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.Ordinal = &v +} + return nil +} + +func (p *RowGroup) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "RowGroup"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *RowGroup) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "columns", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columns: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Columns)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Columns { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columns: ", p), err) } + return err +} + +func (p *RowGroup) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "total_byte_size", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_byte_size: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TotalByteSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_byte_size (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_byte_size: ", p), err) } + return err +} + +func (p *RowGroup) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.NumRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } + return err +} + +func (p *RowGroup) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSortingColumns() { + if err := oprot.WriteFieldBegin(ctx, "sorting_columns", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:sorting_columns: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.SortingColumns)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.SortingColumns { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:sorting_columns: ", p), err) } + } + return err +} + +func (p *RowGroup) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFileOffset() { + if err := oprot.WriteFieldBegin(ctx, "file_offset", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:file_offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.FileOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_offset (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:file_offset: ", p), err) } + } + return err +} + +func (p *RowGroup) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTotalCompressedSize() { + if err := oprot.WriteFieldBegin(ctx, "total_compressed_size", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:total_compressed_size: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.TotalCompressedSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_compressed_size (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:total_compressed_size: ", p), err) } + } + return err +} + +func (p *RowGroup) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOrdinal() { + if err := oprot.WriteFieldBegin(ctx, "ordinal", thrift.I16, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:ordinal: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.Ordinal)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ordinal (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:ordinal: ", p), err) } + } + return err +} + +func (p *RowGroup) Equals(other *RowGroup) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Columns) != len(other.Columns) { return false } + for i, _tgt := range p.Columns { + _src12 := other.Columns[i] + if !_tgt.Equals(_src12) { return false } + } + if p.TotalByteSize != other.TotalByteSize { return false } + if p.NumRows != other.NumRows { return false } + if len(p.SortingColumns) != len(other.SortingColumns) { return false } + for i, _tgt := range p.SortingColumns { + _src13 := other.SortingColumns[i] + if !_tgt.Equals(_src13) { return false } + } + if p.FileOffset != other.FileOffset { + if p.FileOffset == nil || other.FileOffset == nil { + return false + } + if (*p.FileOffset) != (*other.FileOffset) { return false } + } + if p.TotalCompressedSize != other.TotalCompressedSize { + if p.TotalCompressedSize == nil || other.TotalCompressedSize == nil { + return false + } + if (*p.TotalCompressedSize) != (*other.TotalCompressedSize) { return false } + } + if p.Ordinal != other.Ordinal { + if p.Ordinal == nil || other.Ordinal == nil { + return false + } + if (*p.Ordinal) != (*other.Ordinal) { return false } + } + return true +} + +func (p *RowGroup) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RowGroup(%+v)", *p) +} + +// Empty struct to signal the order defined by the physical or logical type +type TypeDefinedOrder struct { +} + +func NewTypeDefinedOrder() *TypeDefinedOrder { + return &TypeDefinedOrder{} +} + +func (p *TypeDefinedOrder) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TypeDefinedOrder) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TypeDefinedOrder"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TypeDefinedOrder) Equals(other *TypeDefinedOrder) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + return true +} + +func (p *TypeDefinedOrder) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TypeDefinedOrder(%+v)", *p) +} + +// Union to specify the order used for the min_value and max_value fields for a +// column. This union takes the role of an enhanced enum that allows rich +// elements (which will be needed for a collation-based ordering in the future). +// +// Possible values are: +// * TypeDefinedOrder - the column uses the order defined by its logical or +// physical type (if there is no logical type). +// +// If the reader does not support the value of this union, min and max stats +// for this column should be ignored. +// +// Attributes: +// - TYPE_ORDER: The sort orders for logical types are: +// UTF8 - unsigned byte-wise comparison +// INT8 - signed comparison +// INT16 - signed comparison +// INT32 - signed comparison +// INT64 - signed comparison +// UINT8 - unsigned comparison +// UINT16 - unsigned comparison +// UINT32 - unsigned comparison +// UINT64 - unsigned comparison +// DECIMAL - signed comparison of the represented value +// DATE - signed comparison +// TIME_MILLIS - signed comparison +// TIME_MICROS - signed comparison +// TIMESTAMP_MILLIS - signed comparison +// TIMESTAMP_MICROS - signed comparison +// INTERVAL - unsigned comparison +// JSON - unsigned byte-wise comparison +// BSON - unsigned byte-wise comparison +// ENUM - unsigned byte-wise comparison +// LIST - undefined +// MAP - undefined +// +// In the absence of logical types, the sort order is determined by the physical type: +// BOOLEAN - false, true +// INT32 - signed comparison +// INT64 - signed comparison +// INT96 (only used for legacy timestamps) - undefined +// FLOAT - signed comparison of the represented value (*) +// DOUBLE - signed comparison of the represented value (*) +// BYTE_ARRAY - unsigned byte-wise comparison +// FIXED_LEN_BYTE_ARRAY - unsigned byte-wise comparison +// +// (*) Because the sorting order is not specified properly for floating +// point values (relations vs. total ordering) the following +// compatibility rules should be applied when reading statistics: +// - If the min is a NaN, it should be ignored. +// - If the max is a NaN, it should be ignored. +// - If the min is +0, the row group may contain -0 values as well. +// - If the max is -0, the row group may contain +0 values as well. +// - When looking for NaN values, min and max should be ignored. +type ColumnOrder struct { + TYPE_ORDER *TypeDefinedOrder `thrift:"TYPE_ORDER,1" db:"TYPE_ORDER" json:"TYPE_ORDER,omitempty"` +} + +func NewColumnOrder() *ColumnOrder { + return &ColumnOrder{} +} + +var ColumnOrder_TYPE_ORDER_DEFAULT *TypeDefinedOrder +func (p *ColumnOrder) GetTYPE_ORDER() *TypeDefinedOrder { + if !p.IsSetTYPE_ORDER() { + return ColumnOrder_TYPE_ORDER_DEFAULT + } +return p.TYPE_ORDER +} +func (p *ColumnOrder) CountSetFieldsColumnOrder() int { + count := 0 + if (p.IsSetTYPE_ORDER()) { + count++ + } + return count + +} + +func (p *ColumnOrder) IsSetTYPE_ORDER() bool { + return p.TYPE_ORDER != nil +} + +func (p *ColumnOrder) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ColumnOrder) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.TYPE_ORDER = &TypeDefinedOrder{} + if err := p.TYPE_ORDER.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TYPE_ORDER), err) + } + return nil +} + +func (p *ColumnOrder) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsColumnOrder(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "ColumnOrder"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ColumnOrder) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTYPE_ORDER() { + if err := oprot.WriteFieldBegin(ctx, "TYPE_ORDER", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:TYPE_ORDER: ", p), err) } + if err := p.TYPE_ORDER.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TYPE_ORDER), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:TYPE_ORDER: ", p), err) } + } + return err +} + +func (p *ColumnOrder) Equals(other *ColumnOrder) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.TYPE_ORDER.Equals(other.TYPE_ORDER) { return false } + return true +} + +func (p *ColumnOrder) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ColumnOrder(%+v)", *p) +} + +// Attributes: +// - Offset: Offset of the page in the file * +// - CompressedPageSize: Size of the page, including header. Sum of compressed_page_size and header +// length +// - FirstRowIndex: Index within the RowGroup of the first row of the page; this means pages +// change on record boundaries (r = 0). +type PageLocation struct { + Offset int64 `thrift:"offset,1,required" db:"offset" json:"offset"` + CompressedPageSize int32 `thrift:"compressed_page_size,2,required" db:"compressed_page_size" json:"compressed_page_size"` + FirstRowIndex int64 `thrift:"first_row_index,3,required" db:"first_row_index" json:"first_row_index"` +} + +func NewPageLocation() *PageLocation { + return &PageLocation{} +} + + +func (p *PageLocation) GetOffset() int64 { + return p.Offset +} + +func (p *PageLocation) GetCompressedPageSize() int32 { + return p.CompressedPageSize +} + +func (p *PageLocation) GetFirstRowIndex() int64 { + return p.FirstRowIndex +} +func (p *PageLocation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOffset bool = false; + var issetCompressedPageSize bool = false; + var issetFirstRowIndex bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOffset = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetCompressedPageSize = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetFirstRowIndex = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOffset{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Offset is not set")); + } + if !issetCompressedPageSize{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field CompressedPageSize is not set")); + } + if !issetFirstRowIndex{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FirstRowIndex is not set")); + } + return nil +} + +func (p *PageLocation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Offset = v +} + return nil +} + +func (p *PageLocation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.CompressedPageSize = v +} + return nil +} + +func (p *PageLocation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.FirstRowIndex = v +} + return nil +} + +func (p *PageLocation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "PageLocation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *PageLocation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "offset", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:offset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.Offset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.offset (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:offset: ", p), err) } + return err +} + +func (p *PageLocation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "compressed_page_size", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:compressed_page_size: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.CompressedPageSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:compressed_page_size: ", p), err) } + return err +} + +func (p *PageLocation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "first_row_index", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:first_row_index: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.FirstRowIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.first_row_index (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:first_row_index: ", p), err) } + return err +} + +func (p *PageLocation) Equals(other *PageLocation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Offset != other.Offset { return false } + if p.CompressedPageSize != other.CompressedPageSize { return false } + if p.FirstRowIndex != other.FirstRowIndex { return false } + return true +} + +func (p *PageLocation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PageLocation(%+v)", *p) +} + +// Attributes: +// - PageLocations: PageLocations, ordered by increasing PageLocation.offset. It is required +// that page_locations[i].first_row_index < page_locations[i+1].first_row_index. +type OffsetIndex struct { + PageLocations []*PageLocation `thrift:"page_locations,1,required" db:"page_locations" json:"page_locations"` +} + +func NewOffsetIndex() *OffsetIndex { + return &OffsetIndex{} +} + + +func (p *OffsetIndex) GetPageLocations() []*PageLocation { + return p.PageLocations +} +func (p *OffsetIndex) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetPageLocations bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetPageLocations = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetPageLocations{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PageLocations is not set")); + } + return nil +} + +func (p *OffsetIndex) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PageLocation, 0, size) + p.PageLocations = tSlice + for i := 0; i < size; i ++ { + _elem14 := &PageLocation{} + if err := _elem14.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem14), err) + } + p.PageLocations = append(p.PageLocations, _elem14) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *OffsetIndex) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "OffsetIndex"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *OffsetIndex) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "page_locations", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:page_locations: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PageLocations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PageLocations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:page_locations: ", p), err) } + return err +} + +func (p *OffsetIndex) Equals(other *OffsetIndex) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.PageLocations) != len(other.PageLocations) { return false } + for i, _tgt := range p.PageLocations { + _src15 := other.PageLocations[i] + if !_tgt.Equals(_src15) { return false } + } + return true +} + +func (p *OffsetIndex) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("OffsetIndex(%+v)", *p) +} + +// Description for ColumnIndex. +// Each [i] refers to the page at OffsetIndex.page_locations[i] +// +// Attributes: +// - NullPages: A list of Boolean values to determine the validity of the corresponding +// min and max values. If true, a page contains only null values, and writers +// have to set the corresponding entries in min_values and max_values to +// byte[0], so that all lists have the same length. If false, the +// corresponding entries in min_values and max_values must be valid. +// - MinValues: Two lists containing lower and upper bounds for the values of each page. +// These may be the actual minimum and maximum values found on a page, but +// can also be (more compact) values that do not exist on a page. For +// example, instead of storing ""Blart Versenwald III", a writer may set +// min_values[i]="B", max_values[i]="C". Such more compact values must still +// be valid values within the column's logical type. Readers must make sure +// that list entries are populated before using them by inspecting null_pages. +// - MaxValues +// - BoundaryOrder: Stores whether both min_values and max_values are orderd and if so, in +// which direction. This allows readers to perform binary searches in both +// lists. Readers cannot assume that max_values[i] <= min_values[i+1], even +// if the lists are ordered. +// - NullCounts: A list containing the number of null values for each page * +type ColumnIndex struct { + NullPages []bool `thrift:"null_pages,1,required" db:"null_pages" json:"null_pages"` + MinValues [][]byte `thrift:"min_values,2,required" db:"min_values" json:"min_values"` + MaxValues [][]byte `thrift:"max_values,3,required" db:"max_values" json:"max_values"` + BoundaryOrder BoundaryOrder `thrift:"boundary_order,4,required" db:"boundary_order" json:"boundary_order"` + NullCounts []int64 `thrift:"null_counts,5" db:"null_counts" json:"null_counts,omitempty"` +} + +func NewColumnIndex() *ColumnIndex { + return &ColumnIndex{} +} + + +func (p *ColumnIndex) GetNullPages() []bool { + return p.NullPages +} + +func (p *ColumnIndex) GetMinValues() [][]byte { + return p.MinValues +} + +func (p *ColumnIndex) GetMaxValues() [][]byte { + return p.MaxValues +} + +func (p *ColumnIndex) GetBoundaryOrder() BoundaryOrder { + return p.BoundaryOrder +} +var ColumnIndex_NullCounts_DEFAULT []int64 + +func (p *ColumnIndex) GetNullCounts() []int64 { + return p.NullCounts +} +func (p *ColumnIndex) IsSetNullCounts() bool { + return p.NullCounts != nil +} + +func (p *ColumnIndex) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNullPages bool = false; + var issetMinValues bool = false; + var issetMaxValues bool = false; + var issetBoundaryOrder bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNullPages = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetMinValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetMaxValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetBoundaryOrder = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNullPages{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NullPages is not set")); + } + if !issetMinValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MinValues is not set")); + } + if !issetMaxValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValues is not set")); + } + if !issetBoundaryOrder{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BoundaryOrder is not set")); + } + return nil +} + +func (p *ColumnIndex) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]bool, 0, size) + p.NullPages = tSlice + for i := 0; i < size; i ++ { +var _elem16 bool + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem16 = v +} + p.NullPages = append(p.NullPages, _elem16) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnIndex) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]byte, 0, size) + p.MinValues = tSlice + for i := 0; i < size; i ++ { +var _elem17 []byte + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem17 = v +} + p.MinValues = append(p.MinValues, _elem17) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnIndex) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]byte, 0, size) + p.MaxValues = tSlice + for i := 0; i < size; i ++ { +var _elem18 []byte + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem18 = v +} + p.MaxValues = append(p.MaxValues, _elem18) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnIndex) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := BoundaryOrder(v) + p.BoundaryOrder = temp +} + return nil +} + +func (p *ColumnIndex) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int64, 0, size) + p.NullCounts = tSlice + for i := 0; i < size; i ++ { +var _elem19 int64 + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem19 = v +} + p.NullCounts = append(p.NullCounts, _elem19) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ColumnIndex) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ColumnIndex"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ColumnIndex) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "null_pages", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:null_pages: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.BOOL, len(p.NullPages)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.NullPages { + if err := oprot.WriteBool(ctx, bool(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:null_pages: ", p), err) } + return err +} + +func (p *ColumnIndex) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "min_values", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:min_values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.MinValues)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.MinValues { + if err := oprot.WriteBinary(ctx, v); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:min_values: ", p), err) } + return err +} + +func (p *ColumnIndex) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "max_values", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.MaxValues)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.MaxValues { + if err := oprot.WriteBinary(ctx, v); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_values: ", p), err) } + return err +} + +func (p *ColumnIndex) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "boundary_order", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:boundary_order: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.BoundaryOrder)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.boundary_order (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:boundary_order: ", p), err) } + return err +} + +func (p *ColumnIndex) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNullCounts() { + if err := oprot.WriteFieldBegin(ctx, "null_counts", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:null_counts: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I64, len(p.NullCounts)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.NullCounts { + if err := oprot.WriteI64(ctx, int64(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:null_counts: ", p), err) } + } + return err +} + +func (p *ColumnIndex) Equals(other *ColumnIndex) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.NullPages) != len(other.NullPages) { return false } + for i, _tgt := range p.NullPages { + _src20 := other.NullPages[i] + if _tgt != _src20 { return false } + } + if len(p.MinValues) != len(other.MinValues) { return false } + for i, _tgt := range p.MinValues { + _src21 := other.MinValues[i] + if bytes.Compare(_tgt, _src21) != 0 { return false } + } + if len(p.MaxValues) != len(other.MaxValues) { return false } + for i, _tgt := range p.MaxValues { + _src22 := other.MaxValues[i] + if bytes.Compare(_tgt, _src22) != 0 { return false } + } + if p.BoundaryOrder != other.BoundaryOrder { return false } + if len(p.NullCounts) != len(other.NullCounts) { return false } + for i, _tgt := range p.NullCounts { + _src23 := other.NullCounts[i] + if _tgt != _src23 { return false } + } + return true +} + +func (p *ColumnIndex) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ColumnIndex(%+v)", *p) +} + +// Attributes: +// - AadPrefix: AAD prefix * +// - AadFileUnique: Unique file identifier part of AAD suffix * +// - SupplyAadPrefix: In files encrypted with AAD prefix without storing it, +// readers must supply the prefix * +type AesGcmV1 struct { + AadPrefix []byte `thrift:"aad_prefix,1" db:"aad_prefix" json:"aad_prefix,omitempty"` + AadFileUnique []byte `thrift:"aad_file_unique,2" db:"aad_file_unique" json:"aad_file_unique,omitempty"` + SupplyAadPrefix *bool `thrift:"supply_aad_prefix,3" db:"supply_aad_prefix" json:"supply_aad_prefix,omitempty"` +} + +func NewAesGcmV1() *AesGcmV1 { + return &AesGcmV1{} +} + +var AesGcmV1_AadPrefix_DEFAULT []byte + +func (p *AesGcmV1) GetAadPrefix() []byte { + return p.AadPrefix +} +var AesGcmV1_AadFileUnique_DEFAULT []byte + +func (p *AesGcmV1) GetAadFileUnique() []byte { + return p.AadFileUnique +} +var AesGcmV1_SupplyAadPrefix_DEFAULT bool +func (p *AesGcmV1) GetSupplyAadPrefix() bool { + if !p.IsSetSupplyAadPrefix() { + return AesGcmV1_SupplyAadPrefix_DEFAULT + } +return *p.SupplyAadPrefix +} +func (p *AesGcmV1) IsSetAadPrefix() bool { + return p.AadPrefix != nil +} + +func (p *AesGcmV1) IsSetAadFileUnique() bool { + return p.AadFileUnique != nil +} + +func (p *AesGcmV1) IsSetSupplyAadPrefix() bool { + return p.SupplyAadPrefix != nil +} + +func (p *AesGcmV1) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AesGcmV1) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.AadPrefix = v +} + return nil +} + +func (p *AesGcmV1) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.AadFileUnique = v +} + return nil +} + +func (p *AesGcmV1) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SupplyAadPrefix = &v +} + return nil +} + +func (p *AesGcmV1) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "AesGcmV1"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AesGcmV1) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAadPrefix() { + if err := oprot.WriteFieldBegin(ctx, "aad_prefix", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:aad_prefix: ", p), err) } + if err := oprot.WriteBinary(ctx, p.AadPrefix); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.aad_prefix (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:aad_prefix: ", p), err) } + } + return err +} + +func (p *AesGcmV1) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAadFileUnique() { + if err := oprot.WriteFieldBegin(ctx, "aad_file_unique", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:aad_file_unique: ", p), err) } + if err := oprot.WriteBinary(ctx, p.AadFileUnique); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.aad_file_unique (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:aad_file_unique: ", p), err) } + } + return err +} + +func (p *AesGcmV1) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSupplyAadPrefix() { + if err := oprot.WriteFieldBegin(ctx, "supply_aad_prefix", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:supply_aad_prefix: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.SupplyAadPrefix)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.supply_aad_prefix (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:supply_aad_prefix: ", p), err) } + } + return err +} + +func (p *AesGcmV1) Equals(other *AesGcmV1) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if bytes.Compare(p.AadPrefix, other.AadPrefix) != 0 { return false } + if bytes.Compare(p.AadFileUnique, other.AadFileUnique) != 0 { return false } + if p.SupplyAadPrefix != other.SupplyAadPrefix { + if p.SupplyAadPrefix == nil || other.SupplyAadPrefix == nil { + return false + } + if (*p.SupplyAadPrefix) != (*other.SupplyAadPrefix) { return false } + } + return true +} + +func (p *AesGcmV1) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AesGcmV1(%+v)", *p) +} + +// Attributes: +// - AadPrefix: AAD prefix * +// - AadFileUnique: Unique file identifier part of AAD suffix * +// - SupplyAadPrefix: In files encrypted with AAD prefix without storing it, +// readers must supply the prefix * +type AesGcmCtrV1 struct { + AadPrefix []byte `thrift:"aad_prefix,1" db:"aad_prefix" json:"aad_prefix,omitempty"` + AadFileUnique []byte `thrift:"aad_file_unique,2" db:"aad_file_unique" json:"aad_file_unique,omitempty"` + SupplyAadPrefix *bool `thrift:"supply_aad_prefix,3" db:"supply_aad_prefix" json:"supply_aad_prefix,omitempty"` +} + +func NewAesGcmCtrV1() *AesGcmCtrV1 { + return &AesGcmCtrV1{} +} + +var AesGcmCtrV1_AadPrefix_DEFAULT []byte + +func (p *AesGcmCtrV1) GetAadPrefix() []byte { + return p.AadPrefix +} +var AesGcmCtrV1_AadFileUnique_DEFAULT []byte + +func (p *AesGcmCtrV1) GetAadFileUnique() []byte { + return p.AadFileUnique +} +var AesGcmCtrV1_SupplyAadPrefix_DEFAULT bool +func (p *AesGcmCtrV1) GetSupplyAadPrefix() bool { + if !p.IsSetSupplyAadPrefix() { + return AesGcmCtrV1_SupplyAadPrefix_DEFAULT + } +return *p.SupplyAadPrefix +} +func (p *AesGcmCtrV1) IsSetAadPrefix() bool { + return p.AadPrefix != nil +} + +func (p *AesGcmCtrV1) IsSetAadFileUnique() bool { + return p.AadFileUnique != nil +} + +func (p *AesGcmCtrV1) IsSetSupplyAadPrefix() bool { + return p.SupplyAadPrefix != nil +} + +func (p *AesGcmCtrV1) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AesGcmCtrV1) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.AadPrefix = v +} + return nil +} + +func (p *AesGcmCtrV1) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.AadFileUnique = v +} + return nil +} + +func (p *AesGcmCtrV1) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SupplyAadPrefix = &v +} + return nil +} + +func (p *AesGcmCtrV1) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "AesGcmCtrV1"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AesGcmCtrV1) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAadPrefix() { + if err := oprot.WriteFieldBegin(ctx, "aad_prefix", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:aad_prefix: ", p), err) } + if err := oprot.WriteBinary(ctx, p.AadPrefix); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.aad_prefix (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:aad_prefix: ", p), err) } + } + return err +} + +func (p *AesGcmCtrV1) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAadFileUnique() { + if err := oprot.WriteFieldBegin(ctx, "aad_file_unique", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:aad_file_unique: ", p), err) } + if err := oprot.WriteBinary(ctx, p.AadFileUnique); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.aad_file_unique (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:aad_file_unique: ", p), err) } + } + return err +} + +func (p *AesGcmCtrV1) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSupplyAadPrefix() { + if err := oprot.WriteFieldBegin(ctx, "supply_aad_prefix", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:supply_aad_prefix: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.SupplyAadPrefix)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.supply_aad_prefix (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:supply_aad_prefix: ", p), err) } + } + return err +} + +func (p *AesGcmCtrV1) Equals(other *AesGcmCtrV1) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if bytes.Compare(p.AadPrefix, other.AadPrefix) != 0 { return false } + if bytes.Compare(p.AadFileUnique, other.AadFileUnique) != 0 { return false } + if p.SupplyAadPrefix != other.SupplyAadPrefix { + if p.SupplyAadPrefix == nil || other.SupplyAadPrefix == nil { + return false + } + if (*p.SupplyAadPrefix) != (*other.SupplyAadPrefix) { return false } + } + return true +} + +func (p *AesGcmCtrV1) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AesGcmCtrV1(%+v)", *p) +} + +// Attributes: +// - AES_GCM_V1 +// - AES_GCM_CTR_V1 +type EncryptionAlgorithm struct { + AES_GCM_V1 *AesGcmV1 `thrift:"AES_GCM_V1,1" db:"AES_GCM_V1" json:"AES_GCM_V1,omitempty"` + AES_GCM_CTR_V1 *AesGcmCtrV1 `thrift:"AES_GCM_CTR_V1,2" db:"AES_GCM_CTR_V1" json:"AES_GCM_CTR_V1,omitempty"` +} + +func NewEncryptionAlgorithm() *EncryptionAlgorithm { + return &EncryptionAlgorithm{} +} + +var EncryptionAlgorithm_AES_GCM_V1_DEFAULT *AesGcmV1 +func (p *EncryptionAlgorithm) GetAES_GCM_V1() *AesGcmV1 { + if !p.IsSetAES_GCM_V1() { + return EncryptionAlgorithm_AES_GCM_V1_DEFAULT + } +return p.AES_GCM_V1 +} +var EncryptionAlgorithm_AES_GCM_CTR_V1_DEFAULT *AesGcmCtrV1 +func (p *EncryptionAlgorithm) GetAES_GCM_CTR_V1() *AesGcmCtrV1 { + if !p.IsSetAES_GCM_CTR_V1() { + return EncryptionAlgorithm_AES_GCM_CTR_V1_DEFAULT + } +return p.AES_GCM_CTR_V1 +} +func (p *EncryptionAlgorithm) CountSetFieldsEncryptionAlgorithm() int { + count := 0 + if (p.IsSetAES_GCM_V1()) { + count++ + } + if (p.IsSetAES_GCM_CTR_V1()) { + count++ + } + return count + +} + +func (p *EncryptionAlgorithm) IsSetAES_GCM_V1() bool { + return p.AES_GCM_V1 != nil +} + +func (p *EncryptionAlgorithm) IsSetAES_GCM_CTR_V1() bool { + return p.AES_GCM_CTR_V1 != nil +} + +func (p *EncryptionAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *EncryptionAlgorithm) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.AES_GCM_V1 = &AesGcmV1{} + if err := p.AES_GCM_V1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.AES_GCM_V1), err) + } + return nil +} + +func (p *EncryptionAlgorithm) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.AES_GCM_CTR_V1 = &AesGcmCtrV1{} + if err := p.AES_GCM_CTR_V1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.AES_GCM_CTR_V1), err) + } + return nil +} + +func (p *EncryptionAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsEncryptionAlgorithm(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "EncryptionAlgorithm"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *EncryptionAlgorithm) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAES_GCM_V1() { + if err := oprot.WriteFieldBegin(ctx, "AES_GCM_V1", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:AES_GCM_V1: ", p), err) } + if err := p.AES_GCM_V1.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.AES_GCM_V1), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:AES_GCM_V1: ", p), err) } + } + return err +} + +func (p *EncryptionAlgorithm) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetAES_GCM_CTR_V1() { + if err := oprot.WriteFieldBegin(ctx, "AES_GCM_CTR_V1", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:AES_GCM_CTR_V1: ", p), err) } + if err := p.AES_GCM_CTR_V1.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.AES_GCM_CTR_V1), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:AES_GCM_CTR_V1: ", p), err) } + } + return err +} + +func (p *EncryptionAlgorithm) Equals(other *EncryptionAlgorithm) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.AES_GCM_V1.Equals(other.AES_GCM_V1) { return false } + if !p.AES_GCM_CTR_V1.Equals(other.AES_GCM_CTR_V1) { return false } + return true +} + +func (p *EncryptionAlgorithm) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EncryptionAlgorithm(%+v)", *p) +} + +// Description for file metadata +// +// Attributes: +// - Version: Version of this file * +// - Schema: Parquet schema for this file. This schema contains metadata for all the columns. +// The schema is represented as a tree with a single root. The nodes of the tree +// are flattened to a list by doing a depth-first traversal. +// The column metadata contains the path in the schema for that column which can be +// used to map columns to nodes in the schema. +// The first element is the root * +// - NumRows: Number of rows in this file * +// - RowGroups: Row groups in this file * +// - KeyValueMetadata: Optional key/value metadata * +// - CreatedBy: String for application that wrote this file. This should be in the format +// version (build ). +// e.g. impala version 1.0 (build 6cf94d29b2b7115df4de2c06e2ab4326d721eb55) +// +// - ColumnOrders: Sort order used for the min_value and max_value fields of each column in +// this file. Sort orders are listed in the order matching the columns in the +// schema. The indexes are not necessary the same though, because only leaf +// nodes of the schema are represented in the list of sort orders. +// +// Without column_orders, the meaning of the min_value and max_value fields is +// undefined. To ensure well-defined behaviour, if min_value and max_value are +// written to a Parquet file, column_orders must be written as well. +// +// The obsolete min and max fields are always sorted by signed comparison +// regardless of column_orders. +// - EncryptionAlgorithm: Encryption algorithm. This field is set only in encrypted files +// with plaintext footer. Files with encrypted footer store algorithm id +// in FileCryptoMetaData structure. +// - FooterSigningKeyMetadata: Retrieval metadata of key used for signing the footer. +// Used only in encrypted files with plaintext footer. +type FileMetaData struct { + Version int32 `thrift:"version,1,required" db:"version" json:"version"` + Schema []*SchemaElement `thrift:"schema,2,required" db:"schema" json:"schema"` + NumRows int64 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` + RowGroups []*RowGroup `thrift:"row_groups,4,required" db:"row_groups" json:"row_groups"` + KeyValueMetadata []*KeyValue `thrift:"key_value_metadata,5" db:"key_value_metadata" json:"key_value_metadata,omitempty"` + CreatedBy *string `thrift:"created_by,6" db:"created_by" json:"created_by,omitempty"` + ColumnOrders []*ColumnOrder `thrift:"column_orders,7" db:"column_orders" json:"column_orders,omitempty"` + EncryptionAlgorithm *EncryptionAlgorithm `thrift:"encryption_algorithm,8" db:"encryption_algorithm" json:"encryption_algorithm,omitempty"` + FooterSigningKeyMetadata []byte `thrift:"footer_signing_key_metadata,9" db:"footer_signing_key_metadata" json:"footer_signing_key_metadata,omitempty"` +} + +func NewFileMetaData() *FileMetaData { + return &FileMetaData{} +} + + +func (p *FileMetaData) GetVersion() int32 { + return p.Version +} + +func (p *FileMetaData) GetSchema() []*SchemaElement { + return p.Schema +} + +func (p *FileMetaData) GetNumRows() int64 { + return p.NumRows +} + +func (p *FileMetaData) GetRowGroups() []*RowGroup { + return p.RowGroups +} +var FileMetaData_KeyValueMetadata_DEFAULT []*KeyValue + +func (p *FileMetaData) GetKeyValueMetadata() []*KeyValue { + return p.KeyValueMetadata +} +var FileMetaData_CreatedBy_DEFAULT string +func (p *FileMetaData) GetCreatedBy() string { + if !p.IsSetCreatedBy() { + return FileMetaData_CreatedBy_DEFAULT + } +return *p.CreatedBy +} +var FileMetaData_ColumnOrders_DEFAULT []*ColumnOrder + +func (p *FileMetaData) GetColumnOrders() []*ColumnOrder { + return p.ColumnOrders +} +var FileMetaData_EncryptionAlgorithm_DEFAULT *EncryptionAlgorithm +func (p *FileMetaData) GetEncryptionAlgorithm() *EncryptionAlgorithm { + if !p.IsSetEncryptionAlgorithm() { + return FileMetaData_EncryptionAlgorithm_DEFAULT + } +return p.EncryptionAlgorithm +} +var FileMetaData_FooterSigningKeyMetadata_DEFAULT []byte + +func (p *FileMetaData) GetFooterSigningKeyMetadata() []byte { + return p.FooterSigningKeyMetadata +} +func (p *FileMetaData) IsSetKeyValueMetadata() bool { + return p.KeyValueMetadata != nil +} + +func (p *FileMetaData) IsSetCreatedBy() bool { + return p.CreatedBy != nil +} + +func (p *FileMetaData) IsSetColumnOrders() bool { + return p.ColumnOrders != nil +} + +func (p *FileMetaData) IsSetEncryptionAlgorithm() bool { + return p.EncryptionAlgorithm != nil +} + +func (p *FileMetaData) IsSetFooterSigningKeyMetadata() bool { + return p.FooterSigningKeyMetadata != nil +} + +func (p *FileMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetVersion bool = false; + var issetSchema bool = false; + var issetNumRows bool = false; + var issetRowGroups bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetVersion = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetSchema = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetNumRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetRowGroups = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.LIST { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetVersion{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Version is not set")); + } + if !issetSchema{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Schema is not set")); + } + if !issetNumRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); + } + if !issetRowGroups{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RowGroups is not set")); + } + return nil +} + +func (p *FileMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Version = v +} + return nil +} + +func (p *FileMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SchemaElement, 0, size) + p.Schema = tSlice + for i := 0; i < size; i ++ { + _elem24 := &SchemaElement{} + if err := _elem24.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem24), err) + } + p.Schema = append(p.Schema, _elem24) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *FileMetaData) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.NumRows = v +} + return nil +} + +func (p *FileMetaData) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*RowGroup, 0, size) + p.RowGroups = tSlice + for i := 0; i < size; i ++ { + _elem25 := &RowGroup{} + if err := _elem25.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem25), err) + } + p.RowGroups = append(p.RowGroups, _elem25) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *FileMetaData) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.KeyValueMetadata = tSlice + for i := 0; i < size; i ++ { + _elem26 := &KeyValue{} + if err := _elem26.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err) + } + p.KeyValueMetadata = append(p.KeyValueMetadata, _elem26) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *FileMetaData) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.CreatedBy = &v +} + return nil +} + +func (p *FileMetaData) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ColumnOrder, 0, size) + p.ColumnOrders = tSlice + for i := 0; i < size; i ++ { + _elem27 := &ColumnOrder{} + if err := _elem27.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem27), err) + } + p.ColumnOrders = append(p.ColumnOrders, _elem27) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *FileMetaData) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.EncryptionAlgorithm = &EncryptionAlgorithm{} + if err := p.EncryptionAlgorithm.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.EncryptionAlgorithm), err) + } + return nil +} + +func (p *FileMetaData) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.FooterSigningKeyMetadata = v +} + return nil +} + +func (p *FileMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "FileMetaData"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *FileMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "version", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:version: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Version)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.version (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:version: ", p), err) } + return err +} + +func (p *FileMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "schema", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:schema: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Schema)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Schema { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:schema: ", p), err) } + return err +} + +func (p *FileMetaData) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.NumRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } + return err +} + +func (p *FileMetaData) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "row_groups", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:row_groups: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.RowGroups)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RowGroups { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:row_groups: ", p), err) } + return err +} + +func (p *FileMetaData) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetKeyValueMetadata() { + if err := oprot.WriteFieldBegin(ctx, "key_value_metadata", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:key_value_metadata: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.KeyValueMetadata)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.KeyValueMetadata { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:key_value_metadata: ", p), err) } + } + return err +} + +func (p *FileMetaData) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCreatedBy() { + if err := oprot.WriteFieldBegin(ctx, "created_by", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:created_by: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CreatedBy)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.created_by (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:created_by: ", p), err) } + } + return err +} + +func (p *FileMetaData) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumnOrders() { + if err := oprot.WriteFieldBegin(ctx, "column_orders", thrift.LIST, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:column_orders: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ColumnOrders)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ColumnOrders { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:column_orders: ", p), err) } + } + return err +} + +func (p *FileMetaData) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEncryptionAlgorithm() { + if err := oprot.WriteFieldBegin(ctx, "encryption_algorithm", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:encryption_algorithm: ", p), err) } + if err := p.EncryptionAlgorithm.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.EncryptionAlgorithm), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:encryption_algorithm: ", p), err) } + } + return err +} + +func (p *FileMetaData) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFooterSigningKeyMetadata() { + if err := oprot.WriteFieldBegin(ctx, "footer_signing_key_metadata", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:footer_signing_key_metadata: ", p), err) } + if err := oprot.WriteBinary(ctx, p.FooterSigningKeyMetadata); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.footer_signing_key_metadata (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:footer_signing_key_metadata: ", p), err) } + } + return err +} + +func (p *FileMetaData) Equals(other *FileMetaData) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Version != other.Version { return false } + if len(p.Schema) != len(other.Schema) { return false } + for i, _tgt := range p.Schema { + _src28 := other.Schema[i] + if !_tgt.Equals(_src28) { return false } + } + if p.NumRows != other.NumRows { return false } + if len(p.RowGroups) != len(other.RowGroups) { return false } + for i, _tgt := range p.RowGroups { + _src29 := other.RowGroups[i] + if !_tgt.Equals(_src29) { return false } + } + if len(p.KeyValueMetadata) != len(other.KeyValueMetadata) { return false } + for i, _tgt := range p.KeyValueMetadata { + _src30 := other.KeyValueMetadata[i] + if !_tgt.Equals(_src30) { return false } + } + if p.CreatedBy != other.CreatedBy { + if p.CreatedBy == nil || other.CreatedBy == nil { + return false + } + if (*p.CreatedBy) != (*other.CreatedBy) { return false } + } + if len(p.ColumnOrders) != len(other.ColumnOrders) { return false } + for i, _tgt := range p.ColumnOrders { + _src31 := other.ColumnOrders[i] + if !_tgt.Equals(_src31) { return false } + } + if !p.EncryptionAlgorithm.Equals(other.EncryptionAlgorithm) { return false } + if bytes.Compare(p.FooterSigningKeyMetadata, other.FooterSigningKeyMetadata) != 0 { return false } + return true +} + +func (p *FileMetaData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FileMetaData(%+v)", *p) +} + +// Crypto metadata for files with encrypted footer * +// +// Attributes: +// - EncryptionAlgorithm: Encryption algorithm. This field is only used for files +// with encrypted footer. Files with plaintext footer store algorithm id +// inside footer (FileMetaData structure). +// - KeyMetadata: Retrieval metadata of key used for encryption of footer, +// and (possibly) columns * +type FileCryptoMetaData struct { + EncryptionAlgorithm *EncryptionAlgorithm `thrift:"encryption_algorithm,1,required" db:"encryption_algorithm" json:"encryption_algorithm"` + KeyMetadata []byte `thrift:"key_metadata,2" db:"key_metadata" json:"key_metadata,omitempty"` +} + +func NewFileCryptoMetaData() *FileCryptoMetaData { + return &FileCryptoMetaData{} +} + +var FileCryptoMetaData_EncryptionAlgorithm_DEFAULT *EncryptionAlgorithm +func (p *FileCryptoMetaData) GetEncryptionAlgorithm() *EncryptionAlgorithm { + if !p.IsSetEncryptionAlgorithm() { + return FileCryptoMetaData_EncryptionAlgorithm_DEFAULT + } +return p.EncryptionAlgorithm +} +var FileCryptoMetaData_KeyMetadata_DEFAULT []byte + +func (p *FileCryptoMetaData) GetKeyMetadata() []byte { + return p.KeyMetadata +} +func (p *FileCryptoMetaData) IsSetEncryptionAlgorithm() bool { + return p.EncryptionAlgorithm != nil +} + +func (p *FileCryptoMetaData) IsSetKeyMetadata() bool { + return p.KeyMetadata != nil +} + +func (p *FileCryptoMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetEncryptionAlgorithm bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetEncryptionAlgorithm = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetEncryptionAlgorithm{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field EncryptionAlgorithm is not set")); + } + return nil +} + +func (p *FileCryptoMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.EncryptionAlgorithm = &EncryptionAlgorithm{} + if err := p.EncryptionAlgorithm.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.EncryptionAlgorithm), err) + } + return nil +} + +func (p *FileCryptoMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.KeyMetadata = v +} + return nil +} + +func (p *FileCryptoMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "FileCryptoMetaData"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *FileCryptoMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "encryption_algorithm", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:encryption_algorithm: ", p), err) } + if err := p.EncryptionAlgorithm.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.EncryptionAlgorithm), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:encryption_algorithm: ", p), err) } + return err +} + +func (p *FileCryptoMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetKeyMetadata() { + if err := oprot.WriteFieldBegin(ctx, "key_metadata", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:key_metadata: ", p), err) } + if err := oprot.WriteBinary(ctx, p.KeyMetadata); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key_metadata (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:key_metadata: ", p), err) } + } + return err +} + +func (p *FileCryptoMetaData) Equals(other *FileCryptoMetaData) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.EncryptionAlgorithm.Equals(other.EncryptionAlgorithm) { return false } + if bytes.Compare(p.KeyMetadata, other.KeyMetadata) != 0 { return false } + return true +} + +func (p *FileCryptoMetaData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FileCryptoMetaData(%+v)", *p) +} + diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf new file mode 100644 index 00000000..d714bfd8 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +checks = ["all", "-ST1005", "-ST1000"] diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go new file mode 100644 index 00000000..4700e512 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parquet + +import ( + "bytes" + "fmt" + "io" + + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/internal/utils" +) + +// ReaderProperties are used to define how the file reader will handle buffering and allocating buffers +type ReaderProperties struct { + alloc memory.Allocator + // Default buffer size to utilize when reading chunks, when reading page + // headers or other metadata, this buffer may be increased if necessary + // to read in the necessary metadata. The value here is simply the default + // initial BufferSize when reading a new chunk. + BufferSize int64 + // create with NewFileDecryptionProperties if dealing with an encrypted file + FileDecryptProps *FileDecryptionProperties + // If this is set to true, then the reader will use SectionReader to + // just use the read stream when reading data. Otherwise we will buffer + // the data we're going to read into memory first and then read that buffer. + // + // If reading from higher latency IO, like S3, it might improve performance to + // set this to true in order to read the entire row group in at once rather than + // make multiple smaller data requests. For low latency IO streams or if only + // reading small portions / subsets of the parquet file, this can be set to false + // to reduce the amount of IO performed in order to avoid reading excess amounts of data. + BufferedStreamEnabled bool +} + +type BufferedReader interface { + Peek(int) ([]byte, error) + Discard(int) (int, error) + io.Reader +} + +// NewReaderProperties returns the default Reader Properties using the provided allocator. +// +// If nil is passed for the allocator, then memory.DefaultAllocator will be used. +func NewReaderProperties(alloc memory.Allocator) *ReaderProperties { + if alloc == nil { + alloc = memory.DefaultAllocator + } + return &ReaderProperties{alloc, DefaultBufSize, nil, false} +} + +// Allocator returns the allocator that the properties were initialized with +func (r *ReaderProperties) Allocator() memory.Allocator { return r.alloc } + +// GetStream returns a section of the underlying reader based on whether or not BufferedStream is enabled. +// +// If BufferedStreamEnabled is true, it creates an io.SectionReader, otherwise it will read the entire section +// into a buffer in memory and return a bytes.NewReader for that buffer. +func (r *ReaderProperties) GetStream(source io.ReaderAt, start, nbytes int64) (BufferedReader, error) { + if r.BufferedStreamEnabled { + return utils.NewBufferedReader(io.NewSectionReader(source, start, nbytes), int(r.BufferSize)), nil + } + + data := make([]byte, nbytes) + n, err := source.ReadAt(data, start) + if err != nil { + return nil, fmt.Errorf("parquet: tried reading from file, but got error: %w", err) + } + if n != int(nbytes) { + return nil, fmt.Errorf("parquet: tried reading %d bytes starting at position %d from file but only got %d", nbytes, start, n) + } + + return utils.NewBufferedReader(bytes.NewReader(data), int(nbytes)), nil +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/tools.go b/vendor/github.com/apache/arrow/go/v12/parquet/tools.go new file mode 100644 index 00000000..b9ce84de --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/tools.go @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build tools + +package tools + +import ( + _ "github.com/klauspost/asmfmt/cmd/asmfmt" + _ "github.com/minio/asm2plan9s" + _ "github.com/minio/c2goasm" + _ "golang.org/x/tools/cmd/stringer" +) diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/types.go b/vendor/github.com/apache/arrow/go/v12/parquet/types.go new file mode 100644 index 00000000..0e6e6aec --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/types.go @@ -0,0 +1,391 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parquet + +import ( + "encoding/binary" + "io" + "reflect" + "strings" + "time" + "unsafe" + + "github.com/apache/arrow/go/v12/arrow" + format "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" +) + +const ( + julianUnixEpoch int64 = 2440588 + nanosPerDay int64 = 3600 * 24 * 1000 * 1000 * 1000 + // Int96SizeBytes is the number of bytes that make up an Int96 + Int96SizeBytes int = 12 +) + +var ( + // Int96Traits provides information about the Int96 type + Int96Traits int96Traits + // ByteArrayTraits provides information about the ByteArray type, which is just an []byte + ByteArrayTraits byteArrayTraits + // FixedLenByteArrayTraits provides information about the FixedLenByteArray type which is just an []byte + FixedLenByteArrayTraits fixedLenByteArrayTraits + // ByteArraySizeBytes is the number of bytes returned by reflect.TypeOf(ByteArray{}).Size() + ByteArraySizeBytes int = int(reflect.TypeOf(ByteArray{}).Size()) + // FixedLenByteArraySizeBytes is the number of bytes returned by reflect.TypeOf(FixedLenByteArray{}).Size() + FixedLenByteArraySizeBytes int = int(reflect.TypeOf(FixedLenByteArray{}).Size()) +) + +// ReaderAtSeeker is a combination of the ReaderAt and ReadSeeker interfaces +// from the io package defining the only functionality that is required +// in order for a parquet file to be read by the file functions. We just need +// to be able to call ReadAt, Read, and Seek +type ReaderAtSeeker interface { + io.ReaderAt + io.Seeker +} + +// NewInt96 creates a new Int96 from the given 3 uint32 values. +func NewInt96(v [3]uint32) (out Int96) { + binary.LittleEndian.PutUint32(out[0:], v[0]) + binary.LittleEndian.PutUint32(out[4:], v[1]) + binary.LittleEndian.PutUint32(out[8:], v[2]) + return +} + +// Int96 is a 12 byte integer value utilized for representing timestamps as a 64 bit integer and a 32 bit +// integer. +type Int96 [12]byte + +// SetNanoSeconds sets the Nanosecond field of the Int96 timestamp to the provided value +func (i96 *Int96) SetNanoSeconds(nanos int64) { + binary.LittleEndian.PutUint64(i96[:8], uint64(nanos)) +} + +// String provides the string representation as a timestamp via converting to a time.Time +// and then calling String +func (i96 Int96) String() string { + return i96.ToTime().String() +} + +// ToTime returns a go time.Time object that represents the same time instant as the given Int96 value +func (i96 Int96) ToTime() time.Time { + nanos := binary.LittleEndian.Uint64(i96[:8]) + jdays := binary.LittleEndian.Uint32(i96[8:]) + + nanos = (uint64(jdays)-uint64(julianUnixEpoch))*uint64(nanosPerDay) + nanos + t := time.Unix(0, int64(nanos)) + return t.UTC() +} + +type int96Traits struct{} + +func (int96Traits) BytesRequired(n int) int { return Int96SizeBytes * n } + +func (int96Traits) CastFromBytes(b []byte) []Int96 { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []Int96 + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / Int96SizeBytes + s.Cap = h.Cap / Int96SizeBytes + + return res +} + +func (int96Traits) CastToBytes(b []Int96) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []byte + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len * Int96SizeBytes + s.Cap = h.Cap * Int96SizeBytes + + return res +} + +// ByteArray is a type to be utilized for representing the Parquet ByteArray physical type, represented as a byte slice +type ByteArray []byte + +// Len returns the current length of the ByteArray, equivalent to len(bytearray) +func (b ByteArray) Len() int { + return len(b) +} + +// String returns a string representation of the ByteArray +func (b ByteArray) String() string { + return *(*string)(unsafe.Pointer(&b)) +} + +type byteArrayTraits struct{} + +func (byteArrayTraits) BytesRequired(n int) int { + return ByteArraySizeBytes * n +} + +func (byteArrayTraits) CastFromBytes(b []byte) []ByteArray { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []ByteArray + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / ByteArraySizeBytes + s.Cap = h.Cap / ByteArraySizeBytes + + return res +} + +// FixedLenByteArray is a go type to represent a FixedLengthByteArray as a byte slice +type FixedLenByteArray []byte + +// Len returns the current length of this FixedLengthByteArray, equivalent to len(fixedlenbytearray) +func (b FixedLenByteArray) Len() int { + return len(b) +} + +// String returns a string representation of the FixedLenByteArray +func (b FixedLenByteArray) String() string { + return *(*string)(unsafe.Pointer(&b)) +} + +type fixedLenByteArrayTraits struct{} + +func (fixedLenByteArrayTraits) BytesRequired(n int) int { + return FixedLenByteArraySizeBytes * n +} + +func (fixedLenByteArrayTraits) CastFromBytes(b []byte) []FixedLenByteArray { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + var res []FixedLenByteArray + s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) + s.Data = h.Data + s.Len = h.Len / FixedLenByteArraySizeBytes + s.Cap = h.Cap / FixedLenByteArraySizeBytes + + return res +} + +// Creating our own enums allows avoiding the transitive dependency on the +// compiled thrift definitions in the public API, allowing us to not export +// the entire Thrift definitions, while making everything a simple cast between. +// +// It also let's us add special values like NONE to distinguish between values +// that are set or not set +type ( + // Type is the physical type as in parquet.thrift + Type format.Type + // Cipher is the parquet Cipher Algorithms + Cipher int + // ColumnOrder is the Column Order from the parquet.thrift + ColumnOrder *format.ColumnOrder + // Version is the parquet version type + Version int8 + // DataPageVersion is the version of the Parquet Data Pages + DataPageVersion int8 + // Encoding is the parquet Encoding type + Encoding format.Encoding + // Repetition is the underlying parquet field repetition type as in parquet.thrift + Repetition format.FieldRepetitionType + // ColumnPath is the path from the root of the schema to a given column + ColumnPath []string +) + +func (c ColumnPath) String() string { + if c == nil { + return "" + } + return strings.Join(c, ".") +} + +// Extend creates a new ColumnPath from an existing one, with the new ColumnPath having s appended to the end. +func (c ColumnPath) Extend(s string) ColumnPath { + p := make([]string, len(c), len(c)+1) + copy(p, c) + return append(p, s) +} + +// ColumnPathFromString constructs a ColumnPath from a dot separated string +func ColumnPathFromString(s string) ColumnPath { + return strings.Split(s, ".") +} + +// constants for choosing the Aes Algorithm to use for encryption/decryption +const ( + AesGcm Cipher = iota + AesCtr +) + +// Constants for the parquet Version which governs which data types are allowed +// and how they are represented. For example, uint32 data will be written differently +// depending on this value (as INT64 for V1_0, as UINT32 for other versions). +// +// However, some features - such as compression algorithms, encryption, +// or the improved v2 data page format must be enabled separately in writer +// properties. +const ( + // Enable only pre-2.2 parquet format features when writing. + // + // This is useful for maximum compatibility with legacy readers. + // Note that logical types may still be emitted, as long as they have + // a corresponding converted type. + V1_0 Version = iota // v1.0 + // Enable parquet format 2.4 and earlier features when writing. + // + // This enables uint32 as well as logical types which don't have a + // corresponding converted type. + // + // Note: Parquet format 2.4.0 was released in October 2017 + V2_4 // v2.4 + // Enable Parquet format 2.6 and earlier features when writing. + // + // This enables the nanos time unit in addition to the V2_4 features. + // + // Note: Parquet format 2.6.0 was released in September 2018 + V2_6 // v2.6 + // Enable the latest parquet format 2.x features. + // + // This is equal to the greatest 2.x version supported by this library. + V2_LATEST = V2_6 +) + +// constants for the parquet DataPage Version to use +const ( + DataPageV1 DataPageVersion = iota + DataPageV2 +) + +func (e Encoding) String() string { + return format.Encoding(e).String() +} + +var ( + // Types contains constants for the Physical Types that are used in the Parquet Spec + // + // They can be specified when needed as such: `parquet.Types.Int32` etc. The values + // all correspond to the values in parquet.thrift + Types = struct { + Boolean Type + Int32 Type + Int64 Type + Int96 Type + Float Type + Double Type + ByteArray Type + FixedLenByteArray Type + // this only exists as a convienence so we can denote it when necessary + // nearly all functions that take a parquet.Type will error/panic if given + // Undefined + Undefined Type + }{ + Boolean: Type(format.Type_BOOLEAN), + Int32: Type(format.Type_INT32), + Int64: Type(format.Type_INT64), + Int96: Type(format.Type_INT96), + Float: Type(format.Type_FLOAT), + Double: Type(format.Type_DOUBLE), + ByteArray: Type(format.Type_BYTE_ARRAY), + FixedLenByteArray: Type(format.Type_FIXED_LEN_BYTE_ARRAY), + Undefined: Type(format.Type_FIXED_LEN_BYTE_ARRAY + 1), + } + + // Encodings contains constants for the encoding types of the column data + // + // The values used all correspond to the values in parquet.thrift for the + // corresponding encoding type. + Encodings = struct { + Plain Encoding + PlainDict Encoding + RLE Encoding + RLEDict Encoding + BitPacked Encoding // deprecated, not implemented + DeltaByteArray Encoding + DeltaBinaryPacked Encoding + DeltaLengthByteArray Encoding + }{ + Plain: Encoding(format.Encoding_PLAIN), + PlainDict: Encoding(format.Encoding_PLAIN_DICTIONARY), + RLE: Encoding(format.Encoding_RLE), + RLEDict: Encoding(format.Encoding_RLE_DICTIONARY), + BitPacked: Encoding(format.Encoding_BIT_PACKED), + DeltaByteArray: Encoding(format.Encoding_DELTA_BYTE_ARRAY), + DeltaBinaryPacked: Encoding(format.Encoding_DELTA_BINARY_PACKED), + DeltaLengthByteArray: Encoding(format.Encoding_DELTA_LENGTH_BYTE_ARRAY), + } + + // ColumnOrders contains constants for the Column Ordering fields + ColumnOrders = struct { + Undefined ColumnOrder + TypeDefinedOrder ColumnOrder + }{ + Undefined: format.NewColumnOrder(), + TypeDefinedOrder: &format.ColumnOrder{TYPE_ORDER: format.NewTypeDefinedOrder()}, + } + + // DefaultColumnOrder is to use TypeDefinedOrder + DefaultColumnOrder = ColumnOrders.TypeDefinedOrder + + // Repetitions contains the constants for Field Repetition Types + Repetitions = struct { + Required Repetition + Optional Repetition + Repeated Repetition + Undefined Repetition // convenience value + }{ + Required: Repetition(format.FieldRepetitionType_REQUIRED), + Optional: Repetition(format.FieldRepetitionType_OPTIONAL), + Repeated: Repetition(format.FieldRepetitionType_REPEATED), + Undefined: Repetition(format.FieldRepetitionType_REPEATED + 1), + } +) + +func (t Type) String() string { + switch t { + case Types.Undefined: + return "UNDEFINED" + default: + return format.Type(t).String() + } +} + +func (r Repetition) String() string { + return strings.ToLower(format.FieldRepetitionType(r).String()) +} + +// ByteSize returns the number of bytes required to store a single value of +// the given parquet.Type in memory. +func (t Type) ByteSize() int { + switch t { + case Types.Boolean: + return 1 + case Types.Int32: + return arrow.Int32SizeBytes + case Types.Int64: + return arrow.Int64SizeBytes + case Types.Int96: + return Int96SizeBytes + case Types.Float: + return arrow.Float32SizeBytes + case Types.Double: + return arrow.Float64SizeBytes + case Types.ByteArray: + return ByteArraySizeBytes + case Types.FixedLenByteArray: + return FixedLenByteArraySizeBytes + } + panic("no bytesize info for type") +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go b/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go new file mode 100644 index 00000000..ab01aa48 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version -linecomment"; DO NOT EDIT. + +package parquet + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[V1_0-0] + _ = x[V2_4-1] + _ = x[V2_6-2] +} + +const _Version_name = "v1.0v2.4v2.6" + +var _Version_index = [...]uint8{0, 4, 8, 12} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go new file mode 100644 index 00000000..3faca39f --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go @@ -0,0 +1,533 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parquet + +import ( + "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v12/parquet/compress" +) + +// Constants for default property values used for the default reader, writer and column props. +const ( + // Default Buffer size used for the Reader + DefaultBufSize int64 = 4096 * 4 + // Default data page size limit is 1K it's not guaranteed, but we will try to + // cut data pages off at this size where possible. + DefaultDataPageSize int64 = 1024 * 1024 + // Default is for dictionary encoding to be turned on, use WithDictionaryDefault + // writer property to change that. + DefaultDictionaryEnabled = true + // If the dictionary reaches the size of this limitation, the writer will use + // the fallback encoding (usually plain) instead of continuing to build the + // dictionary index. + DefaultDictionaryPageSizeLimit = DefaultDataPageSize + // In order to attempt to facilitate data page size limits for writing, + // data is written in batches. Increasing the batch size may improve performance + // but the larger the batch size, the easier it is to overshoot the datapage limit. + DefaultWriteBatchSize int64 = 1024 + // Default maximum number of rows for a single row group + DefaultMaxRowGroupLen int64 = 64 * 1024 * 1024 + // Default is to have stats enabled for all columns, use writer properties to + // change the default, or to enable/disable for specific columns. + DefaultStatsEnabled = true + // If the stats are larger than 4K the writer will skip writing them out anyways. + DefaultMaxStatsSize int64 = 4096 + DefaultCreatedBy = "parquet-go version 12.0.1" + DefaultRootName = "schema" +) + +// ColumnProperties defines the encoding, codec, and so on for a given column. +type ColumnProperties struct { + Encoding Encoding + Codec compress.Compression + DictionaryEnabled bool + StatsEnabled bool + MaxStatsSize int64 + CompressionLevel int +} + +// DefaultColumnProperties returns the default properties which get utilized for writing. +// +// The default column properties are the following constants: +// Encoding: Encodings.Plain +// Codec: compress.Codecs.Uncompressed +// DictionaryEnabled: DefaultDictionaryEnabled +// StatsEnabled: DefaultStatsEnabled +// MaxStatsSize: DefaultMaxStatsSize +// CompressionLevel: compress.DefaultCompressionLevel +func DefaultColumnProperties() ColumnProperties { + return ColumnProperties{ + Encoding: Encodings.Plain, + Codec: compress.Codecs.Uncompressed, + DictionaryEnabled: DefaultDictionaryEnabled, + StatsEnabled: DefaultStatsEnabled, + MaxStatsSize: DefaultMaxStatsSize, + CompressionLevel: compress.DefaultCompressionLevel, + } +} + +type writerPropConfig struct { + wr *WriterProperties + encodings map[string]Encoding + codecs map[string]compress.Compression + compressLevel map[string]int + dictEnabled map[string]bool + statsEnabled map[string]bool +} + +// WriterProperty is used as the options for building a writer properties instance +type WriterProperty func(*writerPropConfig) + +// WithAllocator specifies the writer to use the given allocator +func WithAllocator(mem memory.Allocator) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.mem = mem + } +} + +// WithDictionaryDefault sets the default value for whether to enable dictionary encoding +func WithDictionaryDefault(dict bool) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.defColumnProps.DictionaryEnabled = dict + } +} + +// WithDictionaryFor allows enabling or disabling dictionary encoding for a given column path string +func WithDictionaryFor(path string, dict bool) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.dictEnabled[path] = dict + } +} + +// WithDictionaryPath is like WithDictionaryFor, but takes a ColumnPath type +func WithDictionaryPath(path ColumnPath, dict bool) WriterProperty { + return WithDictionaryFor(path.String(), dict) +} + +// WithDictionaryPageSizeLimit is the limit of the dictionary at which the writer +// will fallback to plain encoding instead +func WithDictionaryPageSizeLimit(limit int64) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.dictPagesize = limit + } +} + +// WithBatchSize specifies the number of rows to use for batch writes to columns +func WithBatchSize(batch int64) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.batchSize = batch + } +} + +// WithMaxRowGroupLength specifies the number of rows as the maximum number of rows for a given row group in the writer. +func WithMaxRowGroupLength(nrows int64) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.maxRowGroupLen = nrows + } +} + +// WithDataPageSize specifies the size to use for splitting data pages for column writing. +func WithDataPageSize(pgsize int64) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.pageSize = pgsize + } +} + +// WithDataPageVersion specifies whether to use Version 1 or Version 2 of the DataPage spec +func WithDataPageVersion(version DataPageVersion) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.dataPageVersion = version + } +} + +// WithVersion specifies which Parquet Spec version to utilize for writing. +func WithVersion(version Version) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.parquetVersion = version + } +} + +// WithCreatedBy specifies the "created by" string to use for the writer +func WithCreatedBy(createdby string) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.createdBy = createdby + } +} + +// WithRootName enables customization of the name used for the root schema node. This is required +// to maintain compatibility with other tools. +func WithRootName(name string) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.rootName = name + } +} + +// WithRootRepetition enables customization of the repetition used for the root schema node. +// This is required to maintain compatibility with other tools. +func WithRootRepetition(repetition Repetition) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.rootRepetition = repetition + } +} + +// WithEncoding defines the encoding that is used when we aren't using dictionary encoding. +// +// This is either applied if dictionary encoding is disabled, or if we fallback if the dictionary +// grew too large. +func WithEncoding(encoding Encoding) WriterProperty { + return func(cfg *writerPropConfig) { + if encoding == Encodings.PlainDict || encoding == Encodings.RLEDict { + panic("parquet: can't use dictionary encoding as fallback encoding") + } + cfg.wr.defColumnProps.Encoding = encoding + } +} + +// WithEncodingFor is for defining the encoding only for a specific column path. This encoding will be used +// if dictionary encoding is disabled for the column or if we fallback because the dictionary grew too large +func WithEncodingFor(path string, encoding Encoding) WriterProperty { + return func(cfg *writerPropConfig) { + if encoding == Encodings.PlainDict || encoding == Encodings.RLEDict { + panic("parquet: can't use dictionary encoding as fallback encoding") + } + cfg.encodings[path] = encoding + } +} + +// WithEncodingPath is the same as WithEncodingFor but takes a ColumnPath directly. +func WithEncodingPath(path ColumnPath, encoding Encoding) WriterProperty { + return WithEncodingFor(path.String(), encoding) +} + +// WithCompression specifies the default compression type to use for column writing. +func WithCompression(codec compress.Compression) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.defColumnProps.Codec = codec + } +} + +// WithCompressionFor specifies the compression type for the given column. +func WithCompressionFor(path string, codec compress.Compression) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.codecs[path] = codec + } +} + +// WithCompressionPath is the same as WithCompressionFor but takes a ColumnPath directly. +func WithCompressionPath(path ColumnPath, codec compress.Compression) WriterProperty { + return WithCompressionFor(path.String(), codec) +} + +// WithMaxStatsSize sets a maximum size for the statistics before we decide not to include them. +func WithMaxStatsSize(maxStatsSize int64) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.defColumnProps.MaxStatsSize = maxStatsSize + } +} + +// WithCompressionLevel specifies the default compression level for the compressor in every column. +// +// The provided compression level is compressor specific. The user would have to know what the available +// levels are for the selected compressor. If the compressor does not allow for selecting different +// compression levels, then this function will have no effect. Parquet and Arrow will not validate the +// passed compression level. If no level is selected by the user or if the special compress.DefaultCompressionLevel +// value is used, then parquet will select the compression level. +func WithCompressionLevel(level int) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.defColumnProps.CompressionLevel = level + } +} + +// WithCompressionLevelFor is like WithCompressionLevel but only for the given column path. +func WithCompressionLevelFor(path string, level int) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.compressLevel[path] = level + } +} + +// WithCompressionLevelPath is the same as WithCompressionLevelFor but takes a ColumnPath +func WithCompressionLevelPath(path ColumnPath, level int) WriterProperty { + return WithCompressionLevelFor(path.String(), level) +} + +// WithStats specifies a default for whether or not to enable column statistics. +func WithStats(enabled bool) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.defColumnProps.StatsEnabled = enabled + } +} + +// WithStatsFor specifies a per column value as to enable or disable statistics in the resulting file. +func WithStatsFor(path string, enabled bool) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.statsEnabled[path] = enabled + } +} + +// WithStatsPath is the same as WithStatsFor but takes a ColumnPath +func WithStatsPath(path ColumnPath, enabled bool) WriterProperty { + return WithStatsFor(path.String(), enabled) +} + +// WithEncryptionProperties specifies the file level encryption handling for writing the file. +func WithEncryptionProperties(props *FileEncryptionProperties) WriterProperty { + return func(cfg *writerPropConfig) { + cfg.wr.encryptionProps = props + } +} + +// WriterProperties is the collection of properties to use for writing a parquet file. The values are +// read only once it has been constructed. +type WriterProperties struct { + mem memory.Allocator + dictPagesize int64 + batchSize int64 + maxRowGroupLen int64 + pageSize int64 + parquetVersion Version + createdBy string + dataPageVersion DataPageVersion + rootName string + rootRepetition Repetition + + defColumnProps ColumnProperties + columnProps map[string]*ColumnProperties + encryptionProps *FileEncryptionProperties +} + +func defaultWriterProperties() *WriterProperties { + return &WriterProperties{ + mem: memory.DefaultAllocator, + dictPagesize: DefaultDictionaryPageSizeLimit, + batchSize: DefaultWriteBatchSize, + maxRowGroupLen: DefaultMaxRowGroupLen, + pageSize: DefaultDataPageSize, + parquetVersion: V2_LATEST, + dataPageVersion: DataPageV1, + createdBy: DefaultCreatedBy, + rootName: DefaultRootName, + rootRepetition: Repetitions.Repeated, + defColumnProps: DefaultColumnProperties(), + } +} + +// NewWriterProperties takes a list of options for building the properties. If multiple options are used which conflict +// then the last option is the one which will take effect. If no WriterProperty options are provided, then the default +// properties will be utilized for writing. +// +// The Default properties use the following constants: +// Allocator: memory.DefaultAllocator +// DictionaryPageSize: DefaultDictionaryPageSizeLimit +// BatchSize: DefaultWriteBatchSize +// MaxRowGroupLength: DefaultMaxRowGroupLen +// PageSize: DefaultDataPageSize +// ParquetVersion: V1 +// DataPageVersion: DataPageV1 +// CreatedBy: DefaultCreatedBy +func NewWriterProperties(opts ...WriterProperty) *WriterProperties { + cfg := writerPropConfig{ + wr: defaultWriterProperties(), + encodings: make(map[string]Encoding), + codecs: make(map[string]compress.Compression), + compressLevel: make(map[string]int), + dictEnabled: make(map[string]bool), + statsEnabled: make(map[string]bool), + } + for _, o := range opts { + o(&cfg) + } + + cfg.wr.columnProps = make(map[string]*ColumnProperties) + get := func(key string) *ColumnProperties { + if p, ok := cfg.wr.columnProps[key]; ok { + return p + } + cfg.wr.columnProps[key] = new(ColumnProperties) + *cfg.wr.columnProps[key] = cfg.wr.defColumnProps + return cfg.wr.columnProps[key] + } + + for key, value := range cfg.encodings { + get(key).Encoding = value + } + + for key, value := range cfg.codecs { + get(key).Codec = value + } + + for key, value := range cfg.compressLevel { + get(key).CompressionLevel = value + } + + for key, value := range cfg.dictEnabled { + get(key).DictionaryEnabled = value + } + + for key, value := range cfg.statsEnabled { + get(key).StatsEnabled = value + } + return cfg.wr +} + +// FileEncryptionProperties returns the current encryption properties that were +// used to create the writer properties. +func (w *WriterProperties) FileEncryptionProperties() *FileEncryptionProperties { + return w.encryptionProps +} + +func (w *WriterProperties) Allocator() memory.Allocator { return w.mem } +func (w *WriterProperties) CreatedBy() string { return w.createdBy } +func (w *WriterProperties) RootName() string { return w.rootName } +func (w *WriterProperties) RootRepetition() Repetition { return w.rootRepetition } +func (w *WriterProperties) WriteBatchSize() int64 { return w.batchSize } +func (w *WriterProperties) DataPageSize() int64 { return w.pageSize } +func (w *WriterProperties) DictionaryPageSizeLimit() int64 { return w.dictPagesize } +func (w *WriterProperties) Version() Version { return w.parquetVersion } +func (w *WriterProperties) DataPageVersion() DataPageVersion { return w.dataPageVersion } +func (w *WriterProperties) MaxRowGroupLength() int64 { return w.maxRowGroupLen } + +// Compression returns the default compression type that will be used for any columns that don't +// have a specific compression defined. +func (w *WriterProperties) Compression() compress.Compression { return w.defColumnProps.Codec } + +// CompressionFor will return the compression type that is specified for the given column path, or +// the default compression codec if there isn't one specific to this column. +func (w *WriterProperties) CompressionFor(path string) compress.Compression { + if p, ok := w.columnProps[path]; ok { + return p.Codec + } + return w.defColumnProps.Codec +} + +//CompressionPath is the same as CompressionFor but takes a ColumnPath +func (w *WriterProperties) CompressionPath(path ColumnPath) compress.Compression { + return w.CompressionFor(path.String()) +} + +// CompressionLevel returns the default compression level that will be used for any column +// that doesn't have a compression level specified for it. +func (w *WriterProperties) CompressionLevel() int { return w.defColumnProps.CompressionLevel } + +// CompressionLevelFor returns the compression level that will be utilized for the given column, +// or the default compression level if the column doesn't have a specific level specified. +func (w *WriterProperties) CompressionLevelFor(path string) int { + if p, ok := w.columnProps[path]; ok { + return p.CompressionLevel + } + return w.defColumnProps.CompressionLevel +} + +// CompressionLevelPath is the same as CompressionLevelFor but takes a ColumnPath object +func (w *WriterProperties) CompressionLevelPath(path ColumnPath) int { + return w.CompressionLevelFor(path.String()) +} + +// Encoding returns the default encoding that will be utilized for any columns which don't have a different value +// specified. +func (w *WriterProperties) Encoding() Encoding { return w.defColumnProps.Encoding } + +// EncodingFor returns the encoding that will be used for the given column path, or the default encoding if there +// isn't one specified for this column. +func (w *WriterProperties) EncodingFor(path string) Encoding { + if p, ok := w.columnProps[path]; ok { + return p.Encoding + } + return w.defColumnProps.Encoding +} + +// EncodingPath is the same as EncodingFor but takes a ColumnPath object +func (w *WriterProperties) EncodingPath(path ColumnPath) Encoding { + return w.EncodingFor(path.String()) +} + +// DictionaryIndexEncoding returns which encoding will be used for the Dictionary Index values based on the +// parquet version. V1 uses PlainDict and V2 uses RLEDict +func (w *WriterProperties) DictionaryIndexEncoding() Encoding { + if w.parquetVersion == V1_0 { + return Encodings.PlainDict + } + return Encodings.RLEDict +} + +// DictionaryPageEncoding returns the encoding that will be utilized for the DictionaryPage itself based on the parquet +// version. V1 uses PlainDict, v2 uses Plain +func (w *WriterProperties) DictionaryPageEncoding() Encoding { + if w.parquetVersion == V1_0 { + return Encodings.PlainDict + } + return Encodings.Plain +} + +// DictionaryEnabled returns the default value as for whether or not dictionary encoding will be utilized for columns +// that aren't separately specified. +func (w *WriterProperties) DictionaryEnabled() bool { return w.defColumnProps.DictionaryEnabled } + +// DictionaryEnabledFor returns whether or not dictionary encoding will be used for the specified column when writing +// or the default value if the column was not separately specified. +func (w *WriterProperties) DictionaryEnabledFor(path string) bool { + if p, ok := w.columnProps[path]; ok { + return p.DictionaryEnabled + } + return w.defColumnProps.DictionaryEnabled +} + +// DictionaryEnabledPath is the same as DictionaryEnabledFor but takes a ColumnPath object. +func (w *WriterProperties) DictionaryEnabledPath(path ColumnPath) bool { + return w.DictionaryEnabledFor(path.String()) +} + +// StatisticsEnabled returns the default value for whether or not stats are enabled to be written for columns +// that aren't separately specified. +func (w *WriterProperties) StatisticsEnabled() bool { return w.defColumnProps.StatsEnabled } + +// StatisticsEnabledFor returns whether stats will be written for the given column path, or the default value if +// it wasn't separately specified. +func (w *WriterProperties) StatisticsEnabledFor(path string) bool { + if p, ok := w.columnProps[path]; ok { + return p.StatsEnabled + } + return w.defColumnProps.StatsEnabled +} + +// StatisticsEnabledPath is the same as StatisticsEnabledFor but takes a ColumnPath object. +func (w *WriterProperties) StatisticsEnabledPath(path ColumnPath) bool { + return w.StatisticsEnabledFor(path.String()) +} + +// MaxStatsSize returns the default maximum size for stats +func (w *WriterProperties) MaxStatsSize() int64 { return w.defColumnProps.MaxStatsSize } + +// MaxStatsSizeFor returns the maximum stat size for the given column path +func (w *WriterProperties) MaxStatsSizeFor(path string) int64 { + if p, ok := w.columnProps[path]; ok { + return p.MaxStatsSize + } + return w.defColumnProps.MaxStatsSize +} + +// MaxStatsSizePath is the same as MaxStatsSizeFor but takes a ColumnPath +func (w *WriterProperties) MaxStatsSizePath(path ColumnPath) int64 { + return w.MaxStatsSizeFor(path.String()) +} + +// ColumnEncryptionProperties returns the specific properties for encryption that will be used for the given column path +func (w *WriterProperties) ColumnEncryptionProperties(path string) *ColumnEncryptionProperties { + if w.encryptionProps != nil { + return w.encryptionProps.ColumnEncryptionProperties(path) + } + return nil +} diff --git a/vendor/github.com/apache/thrift/LICENSE b/vendor/github.com/apache/thrift/LICENSE new file mode 100644 index 00000000..2bc6fbbf --- /dev/null +++ b/vendor/github.com/apache/thrift/LICENSE @@ -0,0 +1,306 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- diff --git a/vendor/github.com/apache/thrift/NOTICE b/vendor/github.com/apache/thrift/NOTICE new file mode 100644 index 00000000..37824e7f --- /dev/null +++ b/vendor/github.com/apache/thrift/NOTICE @@ -0,0 +1,5 @@ +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go new file mode 100644 index 00000000..8b8137ae --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "strings" +) + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 + INVALID_TRANSFORM = 8 + INVALID_PROTOCOL = 9 + UNSUPPORTED_CLIENT_TYPE = 10 + VALIDATION_FAILED = 11 +) + +var defaultApplicationExceptionMessage = map[int32]string{ + UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", + UNKNOWN_METHOD: "unknown method", + INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", + WRONG_METHOD_NAME: "wrong method name", + BAD_SEQUENCE_ID: "bad sequence ID", + MISSING_RESULT: "missing result", + INTERNAL_ERROR: "unknown internal error", + PROTOCOL_ERROR: "unknown protocol error", + INVALID_TRANSFORM: "Invalid transform", + INVALID_PROTOCOL: "Invalid protocol", + UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", + VALIDATION_FAILED: "validation failed", +} + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(ctx context.Context, iprot TProtocol) error + Write(ctx context.Context, oprot TProtocol) error +} + +type ValidationError struct { + message string + check string + fieldSymbol string +} + +func (e *ValidationError) Check() string { + return e.check +} + +func (e *ValidationError) TypeName() string { + return strings.Split(e.fieldSymbol, ".")[0] +} + +func (e *ValidationError) Field() string { + if fs := strings.Split(e.fieldSymbol, "."); len(fs) > 1 { + return fs[1] + } + return e.fieldSymbol +} + +func (e *ValidationError) FieldSymbol() string { + return e.fieldSymbol +} + +func (e ValidationError) Error() string { + return e.message +} + +type tApplicationException struct { + message string + type_ int32 + err error +} + +var _ TApplicationException = (*tApplicationException)(nil) + +func (tApplicationException) TExceptionType() TExceptionType { + return TExceptionTypeApplication +} + +func (e tApplicationException) Error() string { + if e.message != "" { + return e.message + } + return defaultApplicationExceptionMessage[e.type_] +} + +func (e tApplicationException) Unwrap() error { + return e.err +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_, nil} +} + +func NewValidationException(type_ int32, check string, field string, message string) TApplicationException { + return &tApplicationException{ + type_: type_, + message: message, + err: &ValidationError{message: message, check: check, fieldSymbol: field}, + } +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { + // TODO: this should really be generated by the compiler + _, err := iprot.ReadStructBegin(ctx) + if err != nil { + return err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + default: + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + if err = iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return err + } + + p.message = message + p.type_ = type_ + + return nil +} + +func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { + err = oprot.WriteStructBegin(ctx, "TApplicationException") + if err != nil { + return + } + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(ctx, p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + } + err = oprot.WriteFieldBegin(ctx, "type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(ctx, p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + err = oprot.WriteFieldStop(ctx) + if err != nil { + return + } + err = oprot.WriteStructEnd(ctx) + return +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go new file mode 100644 index 00000000..eded9313 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go @@ -0,0 +1,561 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + cfg *TConfiguration + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { + PropagateTConfiguration(t, conf) + p := &TBinaryProtocol{ + origTransport: t, + cfg: conf, + } + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + return p +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{ + cfg: conf, + } +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocolConf(t, p.cfg) +} + +func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + if p.cfg.GetTBinaryStrictWrite() { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(ctx, int32(version)) + if e != nil { + return e + } + e = p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } else { + e := p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } +} + +func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + e := p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(ctx, id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { + e := p.WriteByte(ctx, STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + e := p.WriteByte(ctx, int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { + if value { + return p.WriteByte(ctx, 1) + } + return p.WriteByte(ctx, 0) +} + +func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.trans.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.WriteI64(ctx, int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.Write(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteUUID(ctx context.Context, value Tuuid) error { + _, err := p.trans.Write(value[:]) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.cfg.GetTBinaryStrictRead() { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte(ctx) + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32(ctx) + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte(ctx) + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16(ctx) + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { + k, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + err = checkSizeForProtocol(size32, p.cfg) + if err != nil { + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + err = checkSizeForProtocol(size32, p.cfg) + if err != nil { + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + err = checkSizeForProtocol(size32, p.cfg) + if err != nil { + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { + b, e := p.ReadByte(ctx) + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(ctx, buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(ctx, buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", e + } + err = checkSizeForProtocol(size, p.cfg) + if err != nil { + return + } + if size == 0 { + return "", nil + } + if size < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:size] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + size, e := p.ReadI32(ctx) + if e != nil { + return nil, e + } + if err := checkSizeForProtocol(size, p.cfg); err != nil { + return nil, err + } + + buf, err := safeReadBytes(size, p.trans) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) ReadUUID(ctx context.Context) (value Tuuid, err error) { + buf := p.buffer[0:16] + err = p.readAll(ctx, buf) + if err == nil { + copy(value[:], buf) + } + return value, err +} + +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { + var read int + _, deadlineSet := ctx.Deadline() + for { + read, err = io.ReadFull(p.trans, buf) + if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout without anything read, + // and we still have time left, keep retrying. + continue + } + // For anything else, don't retry + break + } + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + buf, err := safeReadBytes(size, p.trans) + return string(buf), NewTProtocolException(err) +} + +func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) + _ TConfigurationSetter = (*TBinaryProtocol)(nil) +) + +// This function is shared between TBinaryProtocol and TCompactProtocol. +// +// It tries to read size bytes from trans, in a way that prevents large +// allocations when size is insanely large (mostly caused by malformed message). +func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { + if size < 0 { + return nil, nil + } + + buf := new(bytes.Buffer) + _, err := io.CopyN(buf, trans, int64(size)) + return buf.Bytes(), err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go new file mode 100644 index 00000000..aa551b4a --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" +) + +type TBufferedTransportFactory struct { + size int +} + +type TBufferedTransport struct { + bufio.ReadWriter + tp TTransport +} + +func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTBufferedTransport(trans, p.size), nil +} + +func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { + return &TBufferedTransportFactory{size: bufferSize} +} + +func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { + return &TBufferedTransport{ + ReadWriter: bufio.ReadWriter{ + Reader: bufio.NewReaderSize(trans, bufferSize), + Writer: bufio.NewWriterSize(trans, bufferSize), + }, + tp: trans, + } +} + +func (p *TBufferedTransport) IsOpen() bool { + return p.tp.IsOpen() +} + +func (p *TBufferedTransport) Open() (err error) { + return p.tp.Open() +} + +func (p *TBufferedTransport) Close() (err error) { + return p.tp.Close() +} + +func (p *TBufferedTransport) Read(b []byte) (int, error) { + n, err := p.ReadWriter.Read(b) + if err != nil { + p.ReadWriter.Reader.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Write(b []byte) (int, error) { + n, err := p.ReadWriter.Write(b) + if err != nil { + p.ReadWriter.Writer.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Flush(ctx context.Context) error { + if err := p.ReadWriter.Flush(); err != nil { + p.ReadWriter.Writer.Reset(p.tp) + return err + } + return p.tp.Flush(ctx) +} + +func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { + return p.tp.RemainingBytes() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.tp, conf) +} + +var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client.go b/vendor/github.com/apache/thrift/lib/go/thrift/client.go new file mode 100644 index 00000000..ea2c01fd --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/client.go @@ -0,0 +1,109 @@ +package thrift + +import ( + "context" + "fmt" +) + +// ResponseMeta represents the metadata attached to the response. +type ResponseMeta struct { + // The headers in the response, if any. + // If the underlying transport/protocol is not THeader, this will always be nil. + Headers THeaderMap +} + +type TClient interface { + Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +type TStandardClient struct { + seqId int32 + iprot, oprot TProtocol +} + +// TStandardClient implements TClient, and uses the standard message format for Thrift. +// It is not safe for concurrent use. +func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { + return &TStandardClient{ + iprot: inputProtocol, + oprot: outputProtocol, + } +} + +func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { + // Set headers from context object on THeaderProtocol + if headerProt, ok := oprot.(*THeaderProtocol); ok { + headerProt.ClearWriteHeaders() + for _, key := range GetWriteHeaderList(ctx) { + if value, ok := GetHeader(ctx, key); ok { + headerProt.SetWriteHeader(key, value) + } + } + } + + if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { + return err + } + if err := args.Write(ctx, oprot); err != nil { + return err + } + if err := oprot.WriteMessageEnd(ctx); err != nil { + return err + } + return oprot.Flush(ctx) +} + +func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { + rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) + if err != nil { + return err + } + + if method != rMethod { + return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) + } else if seqId != rSeqId { + return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) + } else if rTypeId == EXCEPTION { + var exception tApplicationException + if err := exception.Read(ctx, iprot); err != nil { + return err + } + + if err := iprot.ReadMessageEnd(ctx); err != nil { + return err + } + + return &exception + } else if rTypeId != REPLY { + return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) + } + + if err := result.Read(ctx, iprot); err != nil { + return err + } + + return iprot.ReadMessageEnd(ctx) +} + +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + p.seqId++ + seqId := p.seqId + + if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { + return ResponseMeta{}, err + } + + // method is oneway + if result == nil { + return ResponseMeta{}, nil + } + + err := p.Recv(ctx, p.iprot, seqId, method, result) + var headers THeaderMap + if hp, ok := p.iprot.(*THeaderProtocol); ok { + headers = hp.transport.readHeaders + } + return ResponseMeta{ + Headers: headers, + }, err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go new file mode 100644 index 00000000..18915fee --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go @@ -0,0 +1,866 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C + COMPACT_UUID = 0x0D +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + UUID: COMPACT_UUID, + } +} + +type TCompactProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTCompactProtocolFactoryConf instead. +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return NewTCompactProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { + return &TCompactProtocolFactory{ + cfg: conf, + } +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocolConf(trans, p.cfg) +} + +func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + cfg *TConfiguration + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Deprecated: Use NewTCompactProtocolConf instead. +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + return NewTCompactProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { + PropagateTConfiguration(trans, conf) + p := &TCompactProtocol{ + origTransport: trans, + cfg: conf, + } + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(ctx, name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(ctx, id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) == 0 { + return nil + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// Write a Tuuid to the wire as 16 bytes. +func (p *TCompactProtocol) WriteUUID(ctx context.Context, value Tuuid) error { + _, err := p.trans.Write(value[:]) + return NewTProtocolException(err) +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + var protocolId byte + + _, deadlineSet := ctx.Deadline() + for { + protocolId, err = p.readByteDirect() + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // keep retrying I/O timeout errors since we still have + // time left + continue + } + // For anything else, don't retry + break + } + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString(ctx) + return +} + +func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { + // consume the last field we read off the wire. + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16(ctx) + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + err = checkSizeForProtocol(size32, p.cfg) + if err != nil { + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + size = int(size2) + } + err = checkSizeForProtocol(int32(size), p.cfg) + if err != nil { + return + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.ReadListBegin(ctx) +} + +func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { + v, err := p.ReadI32(ctx) + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return "", nil + } + if length < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:length] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + buf, e := safeReadBytes(length, p.trans) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return []byte{}, nil + } + + buf, e := safeReadBytes(length, p.trans) + return buf, NewTProtocolException(e) +} + +// Read fixed 16 bytes as UUID. +func (p *TCompactProtocol) ReadUUID(ctx context.Context) (value Tuuid, err error) { + buf := p.buffer[0:16] + _, e := io.ReadFull(p.trans, buf) + if e == nil { + copy(value[:], buf) + } + return value, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + case COMPACT_UUID: + return UUID, nil + } + return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} + +func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) + _ TConfigurationSetter = (*TCompactProtocol)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go new file mode 100644 index 00000000..a9565d39 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go @@ -0,0 +1,392 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "fmt" + "time" +) + +// Default TConfiguration values. +const ( + DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 + DEFAULT_MAX_FRAME_SIZE = 16384000 + + DEFAULT_TBINARY_STRICT_READ = false + DEFAULT_TBINARY_STRICT_WRITE = true + + DEFAULT_CONNECT_TIMEOUT = 0 + DEFAULT_SOCKET_TIMEOUT = 0 +) + +// TConfiguration defines some configurations shared between TTransport, +// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. +// +// When constructing TConfiguration, you only need to specify the non-default +// fields. All zero values have sane default values. +// +// Not all configurations defined are applicable to all implementations. +// Implementations are free to ignore the configurations not applicable to them. +// +// All functions attached to this type are nil-safe. +// +// See [1] for spec. +// +// NOTE: When using TConfiguration, fill in all the configurations you want to +// set across the stack, not only the ones you want to set in the immediate +// TTransport/TProtocol. +// +// For example, say you want to migrate this old code into using TConfiguration: +// +// socket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// +// This is the wrong way to do it because in the end the TConfiguration used by +// socket and transFactory will be overwritten by the one used by protoFactory +// because of TConfiguration propagation: +// +// // bad example, DO NOT USE +// socket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) +// +// This is the correct way to do it: +// +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// +// MaxFrameSize: 1024 * 1024 * 256, +// +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// socket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// +// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md +type TConfiguration struct { + // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. + MaxMessageSize int32 + + // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. + // + // Also if MaxMessageSize < MaxFrameSize, + // MaxMessageSize will be used instead. + MaxFrameSize int32 + + // Connect and socket timeouts to be used by TSocket and TSSLSocket. + // + // 0 means no timeout. + // + // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be + // used. + ConnectTimeout time.Duration + SocketTimeout time.Duration + + // TLS config to be used by TSSLSocket. + TLSConfig *tls.Config + + // Strict read/write configurations for TBinaryProtocol. + // + // BoolPtr helper function is available to use literal values. + TBinaryStrictRead *bool + TBinaryStrictWrite *bool + + // The wrapped protocol id to be used in THeader transport/protocol. + // + // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions + // are provided to help filling this value. + THeaderProtocolID *THeaderProtocolID + // The write transforms to be applied to THeaderTransport. + THeaderTransforms []THeaderTransformID + + // Used internally by deprecated constructors, to avoid overriding + // underlying TTransport/TProtocol's cfg by accidental propagations. + // + // For external users this is always false. + noPropagation bool +} + +// GetMaxMessageSize returns the max message size an implementation should +// follow. +// +// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. +func (tc *TConfiguration) GetMaxMessageSize() int32 { + if tc == nil || tc.MaxMessageSize <= 0 { + return DEFAULT_MAX_MESSAGE_SIZE + } + return tc.MaxMessageSize +} + +// GetMaxFrameSize returns the max frame size an implementation should follow. +// +// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. +// +// If the configured max message size is smaller than the configured max frame +// size, the smaller one will be returned instead. +func (tc *TConfiguration) GetMaxFrameSize() int32 { + if tc == nil { + return DEFAULT_MAX_FRAME_SIZE + } + maxFrameSize := tc.MaxFrameSize + if maxFrameSize <= 0 { + maxFrameSize = DEFAULT_MAX_FRAME_SIZE + } + if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { + return maxMessageSize + } + return maxFrameSize +} + +// GetConnectTimeout returns the connect timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetConnectTimeout() time.Duration { + if tc == nil || tc.ConnectTimeout < 0 { + return DEFAULT_CONNECT_TIMEOUT + } + return tc.ConnectTimeout +} + +// GetSocketTimeout returns the socket timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetSocketTimeout() time.Duration { + if tc == nil || tc.SocketTimeout < 0 { + return DEFAULT_SOCKET_TIMEOUT + } + return tc.SocketTimeout +} + +// GetTLSConfig returns the tls config should be used by TSSLSocket. +// +// It's nil-safe. If tc is nil, nil will be returned instead. +func (tc *TConfiguration) GetTLSConfig() *tls.Config { + if tc == nil { + return nil + } + return tc.TLSConfig +} + +// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or +// tc.TBinaryStrictRead is nil. +func (tc *TConfiguration) GetTBinaryStrictRead() bool { + if tc == nil || tc.TBinaryStrictRead == nil { + return DEFAULT_TBINARY_STRICT_READ + } + return *tc.TBinaryStrictRead +} + +// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or +// tc.TBinaryStrictWrite is nil. +func (tc *TConfiguration) GetTBinaryStrictWrite() bool { + if tc == nil || tc.TBinaryStrictWrite == nil { + return DEFAULT_TBINARY_STRICT_WRITE + } + return *tc.TBinaryStrictWrite +} + +// GetTHeaderProtocolID returns the THeaderProtocolID should be used by +// THeaderProtocol clients (for servers, they always use the same one as the +// client instead). +// +// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, +// THeaderProtocolDefault will be returned instead. +// THeaderProtocolDefault will also be returned if configured value is invalid. +func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { + if tc == nil || tc.THeaderProtocolID == nil { + return THeaderProtocolDefault + } + protoID := *tc.THeaderProtocolID + if err := protoID.Validate(); err != nil { + return THeaderProtocolDefault + } + return protoID +} + +// GetTHeaderTransforms returns the THeaderTransformIDs to be applied on +// THeaderTransport writing. +// +// It's nil-safe. If tc is nil, empty slice will be returned (meaning no +// transforms to be applied). +func (tc *TConfiguration) GetTHeaderTransforms() []THeaderTransformID { + if tc == nil { + return nil + } + return tc.THeaderTransforms +} + +// THeaderProtocolIDPtr validates and returns the pointer to id. +// +// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault +// and the validation error will be returned. +func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { + err := id.Validate() + if err != nil { + id = THeaderProtocolDefault + } + return &id, err +} + +// THeaderProtocolIDPtrMust validates and returns the pointer to id. +// +// It's similar to THeaderProtocolIDPtr, but it panics on validation errors +// instead of returning them. +func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { + ptr, err := THeaderProtocolIDPtr(id) + if err != nil { + panic(err) + } + return ptr +} + +// TConfigurationSetter is an optional interface TProtocol, TTransport, +// TProtocolFactory, TTransportFactory, and other implementations can implement. +// +// It's intended to be called during intializations. +// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the +// middle of a message is undefined: +// It may or may not change the behavior of the current processing message, +// and it may even cause the current message to fail. +// +// Note for implementations: SetTConfiguration might be called multiple times +// with the same value in quick successions due to the implementation of the +// propagation. Implementations should make SetTConfiguration as simple as +// possible (usually just overwrite the stored configuration and propagate it to +// the wrapped TTransports/TProtocols). +type TConfigurationSetter interface { + SetTConfiguration(*TConfiguration) +} + +// PropagateTConfiguration propagates cfg to impl if impl implements +// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. +// +// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration +// with everything being default value, use &TConfiguration{} explicitly instead. +func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { + if cfg == nil || cfg.noPropagation { + return + } + + if setter, ok := impl.(TConfigurationSetter); ok { + setter.SetTConfiguration(cfg) + } +} + +func checkSizeForProtocol(size int32, cfg *TConfiguration) error { + if size < 0 { + return NewTProtocolExceptionWithType( + NEGATIVE_SIZE, + fmt.Errorf("negative size: %d", size), + ) + } + if size > cfg.GetMaxMessageSize() { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + fmt.Errorf("size exceeded max allowed: %d", size), + ) + } + return nil +} + +type tTransportFactoryConf struct { + delegate TTransportFactory + cfg *TConfiguration +} + +func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { + trans, err := f.delegate.GetTransport(orig) + if err == nil { + PropagateTConfiguration(orig, f.cfg) + PropagateTConfiguration(trans, f.cfg) + } + return trans, err +} + +func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TTransportFactoryConf wraps a TTransportFactory to propagate +// TConfiguration on the factory's GetTransport calls. +func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { + return &tTransportFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +type tProtocolFactoryConf struct { + delegate TProtocolFactory + cfg *TConfiguration +} + +func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { + proto := f.delegate.GetProtocol(trans) + PropagateTConfiguration(trans, f.cfg) + PropagateTConfiguration(proto, f.cfg) + return proto +} + +func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TProtocolFactoryConf wraps a TProtocolFactory to propagate +// TConfiguration on the factory's GetProtocol calls. +func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { + return &tProtocolFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +var ( + _ TConfigurationSetter = (*tTransportFactoryConf)(nil) + _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/context.go b/vendor/github.com/apache/thrift/lib/go/thrift/context.go new file mode 100644 index 00000000..d15c1bcf --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/context.go @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +var defaultCtx = context.Background() diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go new file mode 100644 index 00000000..d730411b --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go @@ -0,0 +1,692 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "log/slog" +) + +type TDebugProtocol struct { + // Required. The actual TProtocol to do the read/write. + Delegate TProtocol + + // Optional. The logger and prefix to log all the args/return values + // from Delegate TProtocol calls. + // + // If Logger is nil, StdLogger using stdlib log package with os.Stderr + // will be used. If disable logging is desired, set Logger to NopLogger + // explicitly instead of leaving it as nil/unset. + // + // Deprecated: TDebugProtocol always use slog at debug level now. + // This field will be removed in a future version. + Logger Logger + + LogPrefix string + + // Optional. An TProtocol to duplicate everything read/written from Delegate. + // + // A typical use case of this is to use TSimpleJSONProtocol wrapping + // TMemoryBuffer in a middleware to json logging requests/responses. + // + // This feature is not available from TDebugProtocolFactory. In order to + // use it you have to construct TDebugProtocol directly, or set DuplicateTo + // field after getting a TDebugProtocol from the factory. + // + // Deprecated: Please use TDuplicateToProtocol instead. + DuplicateTo TProtocol +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string + Logger Logger +} + +// NewTDebugProtocolFactory creates a TDebugProtocolFactory. +// +// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct +// itself instead. This version will use the default logger from standard +// library. +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: StdLogger(nil), + } +} + +// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. +func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: logger, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + Logger: fallbackLogger(t.Logger), + } +} + +func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteMessageBegin", + "name", name, + "typeId", typeId, + "seqid", seqid, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMessageEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteMessageEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { + err := tdp.Delegate.WriteStructBegin(ctx, name) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteStructBegin", + "name", name, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return err +} +func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { + err := tdp.Delegate.WriteStructEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteStructEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteFieldBegin", + "name", name, + "typeId", typeId, + "id", id, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { + err := tdp.Delegate.WriteFieldEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteFieldEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { + err := tdp.Delegate.WriteFieldStop(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteFieldStop", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldStop(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteMapBegin", + "keyType", keyType, + "valueType", valueType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMapEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteMapEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(ctx, elemType, size) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteListBegin", + "elemType", elemType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { + err := tdp.Delegate.WriteListEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteListEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteSetBegin", + "elemType", elemType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { + err := tdp.Delegate.WriteSetEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteSetEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { + err := tdp.Delegate.WriteBool(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteBool", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { + err := tdp.Delegate.WriteByte(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteByte", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { + err := tdp.Delegate.WriteI16(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteI16", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { + err := tdp.Delegate.WriteI32(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteI32", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { + err := tdp.Delegate.WriteI64(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteI64", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { + err := tdp.Delegate.WriteDouble(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteDouble", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { + err := tdp.Delegate.WriteString(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteString", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { + err := tdp.Delegate.WriteBinary(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteBinary", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteUUID(ctx context.Context, value Tuuid) error { + err := tdp.Delegate.WriteUUID(ctx, value) + slog.DebugContext( + ctx, + tdp.LogPrefix+"WriteUUID", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteUUID(ctx, value) + } + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadMessageBegin", + "name", name, + "typeId", typeId, + "seqid", seqid, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return +} +func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMessageEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadMessageEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadStructBegin", + "name", name, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return +} +func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadStructEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadStructEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadFieldBegin", + "name", name, + "typeId", typeId, + "id", id, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return +} +func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadFieldEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadFieldEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadMapBegin", + "keyType", keyType, + "valueType", valueType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return +} +func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMapEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadMapEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadListBegin", + "elemType", elemType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadListEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadListEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadSetBegin", + "elemType", elemType, + "size", size, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadSetEnd(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadSetEnd", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { + value, err = tdp.Delegate.ReadBool(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadBool", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { + value, err = tdp.Delegate.ReadByte(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadByte", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { + value, err = tdp.Delegate.ReadI16(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadI16", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { + value, err = tdp.Delegate.ReadI32(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadI32", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { + value, err = tdp.Delegate.ReadI64(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadI64", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + value, err = tdp.Delegate.ReadDouble(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadDouble", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { + value, err = tdp.Delegate.ReadString(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadString", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadBinary", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadUUID(ctx context.Context) (value Tuuid, err error) { + value, err = tdp.Delegate.ReadUUID(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"ReadUUID", + "value", value, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteUUID(ctx, value) + } + return +} +func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + err = tdp.Delegate.Skip(ctx, fieldType) + slog.DebugContext( + ctx, + tdp.LogPrefix+"Skip", + "fieldType", fieldType, + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Skip(ctx, fieldType) + } + return +} +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) + slog.DebugContext( + ctx, + tdp.LogPrefix+"Flush", + "err", err, + ) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Flush(ctx) + } + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(tdp.Delegate, conf) + PropagateTConfiguration(tdp.DuplicateTo, conf) +} + +var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go new file mode 100644 index 00000000..0c68d6b5 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +type TDeserializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } +} + +type reseter interface { + Reset() +} + +func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { + t.Transport.Reset() + if r, ok := t.Protocol.(reseter); ok { + r.Reset() + } + + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { + t.Transport.Reset() + if r, ok := t.Protocol.(reseter); ok { + r.Reset() + } + + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +// TDeserializerPool is the thread-safe version of TDeserializer, +// it uses resource pool of TDeserializer under the hood. +// +// It must be initialized with either NewTDeserializerPool or +// NewTDeserializerPoolSizeFactory. +type TDeserializerPool struct { + pool *pool[TDeserializer] +} + +// NewTDeserializerPool creates a new TDeserializerPool. +// +// NewTDeserializer can be used as the arg here. +func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { + return &TDeserializerPool{ + pool: newPool(f, nil), + } +} + +// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with +// the given size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { + return &TDeserializerPool{ + pool: newPool(func() *TDeserializer { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } + }, nil), + } +} + +func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { + d := t.pool.get() + defer t.pool.put(&d) + return d.ReadString(ctx, msg, s) +} + +func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { + d := t.pool.get() + defer t.pool.put(&d) + return d.Read(ctx, msg, b) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/duplicate_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/duplicate_protocol.go new file mode 100644 index 00000000..6413909d --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/duplicate_protocol.go @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +type TDuplicateToProtocol struct { + // Required. The actual TProtocol to do the read/write. + Delegate TProtocol + + // Required. An TProtocol to duplicate everything read/written from Delegate. + // + // A typical use case of this is to use TSimpleJSONProtocol wrapping + // TMemoryBuffer in a middleware to json logging requests/responses, + // or wrapping a TTransport that counts bytes written to get the payload + // sizes. + // + // DuplicateTo will be used as write only. For read calls on + // TDuplicateToProtocol, the result read from Delegate will be written + // to DuplicateTo. + DuplicateTo TProtocol +} + +func (tdtp *TDuplicateToProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := tdtp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) + tdtp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteMessageEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteMessageEnd(ctx) + tdtp.DuplicateTo.WriteMessageEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteStructBegin(ctx context.Context, name string) error { + err := tdtp.Delegate.WriteStructBegin(ctx, name) + tdtp.DuplicateTo.WriteStructBegin(ctx, name) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteStructEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteStructEnd(ctx) + tdtp.DuplicateTo.WriteStructEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + err := tdtp.Delegate.WriteFieldBegin(ctx, name, typeId, id) + tdtp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteFieldEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteFieldEnd(ctx) + tdtp.DuplicateTo.WriteFieldEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteFieldStop(ctx context.Context) error { + err := tdtp.Delegate.WriteFieldStop(ctx) + tdtp.DuplicateTo.WriteFieldStop(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + err := tdtp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) + tdtp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteMapEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteMapEnd(ctx) + tdtp.DuplicateTo.WriteMapEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + err := tdtp.Delegate.WriteListBegin(ctx, elemType, size) + tdtp.DuplicateTo.WriteListBegin(ctx, elemType, size) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteListEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteListEnd(ctx) + tdtp.DuplicateTo.WriteListEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + err := tdtp.Delegate.WriteSetBegin(ctx, elemType, size) + tdtp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteSetEnd(ctx context.Context) error { + err := tdtp.Delegate.WriteSetEnd(ctx) + tdtp.DuplicateTo.WriteSetEnd(ctx) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteBool(ctx context.Context, value bool) error { + err := tdtp.Delegate.WriteBool(ctx, value) + tdtp.DuplicateTo.WriteBool(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteByte(ctx context.Context, value int8) error { + err := tdtp.Delegate.WriteByte(ctx, value) + tdtp.DuplicateTo.WriteByte(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteI16(ctx context.Context, value int16) error { + err := tdtp.Delegate.WriteI16(ctx, value) + tdtp.DuplicateTo.WriteI16(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteI32(ctx context.Context, value int32) error { + err := tdtp.Delegate.WriteI32(ctx, value) + tdtp.DuplicateTo.WriteI32(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteI64(ctx context.Context, value int64) error { + err := tdtp.Delegate.WriteI64(ctx, value) + tdtp.DuplicateTo.WriteI64(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteDouble(ctx context.Context, value float64) error { + err := tdtp.Delegate.WriteDouble(ctx, value) + tdtp.DuplicateTo.WriteDouble(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteString(ctx context.Context, value string) error { + err := tdtp.Delegate.WriteString(ctx, value) + tdtp.DuplicateTo.WriteString(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteBinary(ctx context.Context, value []byte) error { + err := tdtp.Delegate.WriteBinary(ctx, value) + tdtp.DuplicateTo.WriteBinary(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) WriteUUID(ctx context.Context, value Tuuid) error { + err := tdtp.Delegate.WriteUUID(ctx, value) + tdtp.DuplicateTo.WriteUUID(ctx, value) + return err +} + +func (tdtp *TDuplicateToProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdtp.Delegate.ReadMessageBegin(ctx) + tdtp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + return +} + +func (tdtp *TDuplicateToProtocol) ReadMessageEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadMessageEnd(ctx) + tdtp.DuplicateTo.WriteMessageEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + name, err = tdtp.Delegate.ReadStructBegin(ctx) + tdtp.DuplicateTo.WriteStructBegin(ctx, name) + return +} + +func (tdtp *TDuplicateToProtocol) ReadStructEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadStructEnd(ctx) + tdtp.DuplicateTo.WriteStructEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdtp.Delegate.ReadFieldBegin(ctx) + tdtp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + return +} + +func (tdtp *TDuplicateToProtocol) ReadFieldEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadFieldEnd(ctx) + tdtp.DuplicateTo.WriteFieldEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdtp.Delegate.ReadMapBegin(ctx) + tdtp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + return +} + +func (tdtp *TDuplicateToProtocol) ReadMapEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadMapEnd(ctx) + tdtp.DuplicateTo.WriteMapEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdtp.Delegate.ReadListBegin(ctx) + tdtp.DuplicateTo.WriteListBegin(ctx, elemType, size) + return +} + +func (tdtp *TDuplicateToProtocol) ReadListEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadListEnd(ctx) + tdtp.DuplicateTo.WriteListEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdtp.Delegate.ReadSetBegin(ctx) + tdtp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + return +} + +func (tdtp *TDuplicateToProtocol) ReadSetEnd(ctx context.Context) (err error) { + err = tdtp.Delegate.ReadSetEnd(ctx) + tdtp.DuplicateTo.WriteSetEnd(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) ReadBool(ctx context.Context) (value bool, err error) { + value, err = tdtp.Delegate.ReadBool(ctx) + tdtp.DuplicateTo.WriteBool(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadByte(ctx context.Context) (value int8, err error) { + value, err = tdtp.Delegate.ReadByte(ctx) + tdtp.DuplicateTo.WriteByte(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadI16(ctx context.Context) (value int16, err error) { + value, err = tdtp.Delegate.ReadI16(ctx) + tdtp.DuplicateTo.WriteI16(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadI32(ctx context.Context) (value int32, err error) { + value, err = tdtp.Delegate.ReadI32(ctx) + tdtp.DuplicateTo.WriteI32(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadI64(ctx context.Context) (value int64, err error) { + value, err = tdtp.Delegate.ReadI64(ctx) + tdtp.DuplicateTo.WriteI64(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + value, err = tdtp.Delegate.ReadDouble(ctx) + tdtp.DuplicateTo.WriteDouble(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadString(ctx context.Context) (value string, err error) { + value, err = tdtp.Delegate.ReadString(ctx) + tdtp.DuplicateTo.WriteString(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + value, err = tdtp.Delegate.ReadBinary(ctx) + tdtp.DuplicateTo.WriteBinary(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) ReadUUID(ctx context.Context) (value Tuuid, err error) { + value, err = tdtp.Delegate.ReadUUID(ctx) + tdtp.DuplicateTo.WriteUUID(ctx, value) + return +} + +func (tdtp *TDuplicateToProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + err = tdtp.Delegate.Skip(ctx, fieldType) + tdtp.DuplicateTo.Skip(ctx, fieldType) + return +} + +func (tdtp *TDuplicateToProtocol) Flush(ctx context.Context) (err error) { + err = tdtp.Delegate.Flush(ctx) + tdtp.DuplicateTo.Flush(ctx) + return +} + +func (tdtp *TDuplicateToProtocol) Transport() TTransport { + return tdtp.Delegate.Transport() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (tdtp *TDuplicateToProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(tdtp.Delegate, conf) + PropagateTConfiguration(tdtp.DuplicateTo, conf) +} + +var _ TConfigurationSetter = (*TDuplicateToProtocol)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go new file mode 100644 index 00000000..5b4cad96 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "reflect" +) + +// Generic Thrift exception +type TException interface { + error + + TExceptionType() TExceptionType +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + msg := prepend + err.Error() + + var te TException + if errors.As(err, &te) { + switch te.TExceptionType() { + case TExceptionTypeTransport: + if t, ok := err.(TTransportException); ok { + return prependTTransportException(prepend, t) + } + case TExceptionTypeProtocol: + if t, ok := err.(TProtocolException); ok { + return prependTProtocolException(prepend, t) + } + case TExceptionTypeApplication: + var t TApplicationException + if errors.As(err, &t) { + return NewTApplicationException(t.TypeId(), msg) + } + } + + return wrappedTException{ + err: err, + msg: msg, + tExceptionType: te.TExceptionType(), + } + } + + return errors.New(msg) +} + +// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. +type TExceptionType byte + +// TExceptionType values +const ( + TExceptionTypeUnknown TExceptionType = iota + TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler + TExceptionTypeApplication // TApplicationExceptions + TExceptionTypeProtocol // TProtocolExceptions + TExceptionTypeTransport // TTransportExceptions +) + +// WrapTException wraps an error into TException. +// +// If err is nil or already TException, it's returned as-is. +// Otherwise it will be wraped into TException with TExceptionType() returning +// TExceptionTypeUnknown, and Unwrap() returning the original error. +func WrapTException(err error) TException { + if err == nil { + return nil + } + + if te, ok := err.(TException); ok { + return te + } + + return wrappedTException{ + err: err, + msg: err.Error(), + tExceptionType: TExceptionTypeUnknown, + } +} + +type wrappedTException struct { + err error + msg string + tExceptionType TExceptionType +} + +func (w wrappedTException) Error() string { + return w.msg +} + +func (w wrappedTException) TExceptionType() TExceptionType { + return w.tExceptionType +} + +func (w wrappedTException) Unwrap() error { + return w.err +} + +var _ TException = wrappedTException{} + +// ExtractExceptionFromResult extracts exceptions defined in thrift IDL from +// result TStruct used in TClient.Call. +// +// For a endpoint defined in thrift IDL like this: +// +// service MyService { +// FooResponse foo(1: FooRequest request) throws ( +// 1: Exception1 error1, +// 2: Exception2 error2, +// ) +// } +// +// The thrift compiler generated go code for the result TStruct would be like: +// +// type MyServiceFooResult struct { +// Success *FooResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +// Error1 *Exception1 `thrift:"error1,1" db:"error1" json:"error1,omitempty"` +// Error2 *Exception2 `thrift:"error2,2" db:"error2" json:"error2,omitempty"` +// } +// +// And this function extracts the first non-nil exception out of +// *MyServiceFooResult. +func ExtractExceptionFromResult(result TStruct) error { + v := reflect.Indirect(reflect.ValueOf(result)) + if v.Kind() != reflect.Struct { + return nil + } + typ := v.Type() + for i := range v.NumField() { + if typ.Field(i).Name == "Success" { + continue + } + field := v.Field(i) + if field.IsZero() { + continue + } + tExc, ok := field.Interface().(TException) + if ok && tExc != nil && tExc.TExceptionType() == TExceptionTypeCompiled { + return tExc + } + } + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go new file mode 100644 index 00000000..e3c323af --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" +) + +// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + + cfg *TConfiguration + + writeBuf *bytes.Buffer + + reader *bufio.Reader + readBuf *bytes.Buffer + + buffer [4]byte +} + +type tFramedTransportFactory struct { + factory TTransportFactory + cfg *TConfiguration +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + PropagateTConfiguration(factory, conf) + return &tFramedTransportFactory{ + factory: factory, + cfg: conf, + } +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + PropagateTConfiguration(base, p.cfg) + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportConf(tt, p.cfg), nil +} + +func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.factory, cfg) + p.cfg = cfg +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { + PropagateTConfiguration(transport, conf) + return &TFramedTransport{ + transport: transport, + reader: bufio.NewReader(transport), + cfg: conf, + } +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (read int, err error) { + defer func() { + // Make sure we return the read buffer back to pool + // after we finished reading from it. + if p.readBuf != nil && p.readBuf.Len() == 0 { + bufPool.put(&p.readBuf) + } + }() + + if p.readBuf != nil { + + read, err = p.readBuf.Read(buf) + if err != io.EOF { + return + } + + // For bytes.Buffer.Read, EOF would only happen when read is zero, + // but still, do a sanity check, + // in case that behavior is changed in a future version of go stdlib. + // When that happens, just return nil error, + // and let the caller call Read again to read the next frame. + if read > 0 { + return read, nil + } + } + + // Reaching here means that the last Read finished the last frame, + // so we need to read the next frame into readBuf now. + if err = p.readFrame(); err != nil { + return read, err + } + newRead, err := p.Read(buf[read:]) + return read + newRead, err +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + buf := p.buffer[:1] + _, err = p.Read(buf) + if err != nil { + return + } + c = buf[0] + return +} + +func (p *TFramedTransport) ensureWriteBufferBeforeWrite() { + if p.writeBuf == nil { + p.writeBuf = bufPool.get() + } +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + p.ensureWriteBufferBeforeWrite() + n, err := p.writeBuf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + p.ensureWriteBufferBeforeWrite() + return p.writeBuf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + p.ensureWriteBufferBeforeWrite() + return p.writeBuf.WriteString(s) +} + +func (p *TFramedTransport) Flush(ctx context.Context) error { + defer bufPool.put(&p.writeBuf) + size := p.writeBuf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if _, err := io.Copy(p.transport, p.writeBuf); err != nil { + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush(ctx) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrame() error { + if p.readBuf != nil { + bufPool.put(&p.readBuf) + } + p.readBuf = bufPool.get() + + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return err + } + size := binary.BigEndian.Uint32(buf) + if size > uint32(p.cfg.GetMaxFrameSize()) { + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + _, err := io.CopyN(p.readBuf, p.reader, int64(size)) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + if p.readBuf == nil { + return 0 + } + return uint64(p.readBuf.Len()) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tFramedTransportFactory)(nil) + _ TConfigurationSetter = (*TFramedTransport)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go new file mode 100644 index 00000000..ac9bd488 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. +type ( + headerKey string + headerKeyList int +) + +// Values for headerKeyList. +const ( + headerKeyListRead headerKeyList = iota + headerKeyListWrite +) + +// SetHeader sets a header in the context. +func SetHeader(ctx context.Context, key, value string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + value, + ) +} + +// UnsetHeader unsets a previously set header in the context. +func UnsetHeader(ctx context.Context, key string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + nil, + ) +} + +// GetHeader returns a value of the given header from the context. +func GetHeader(ctx context.Context, key string) (value string, ok bool) { + if v := ctx.Value(headerKey(key)); v != nil { + value, ok = v.(string) + } + return +} + +// SetReadHeaderList sets the key list of read THeaders in the context. +func SetReadHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListRead, + keys, + ) +} + +// GetReadHeaderList returns the key list of read THeaders from the context. +func GetReadHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListRead); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// SetWriteHeaderList sets the key list of THeaders to write in the context. +func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListWrite, + keys, + ) +} + +// GetWriteHeaderList returns the key list of THeaders to write from the context. +func GetWriteHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListWrite); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// AddReadTHeaderToContext adds the whole THeader headers into context. +func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { + keys := make([]string, 0, len(headers)) + for key, value := range headers { + ctx = SetHeader(ctx, key, value) + keys = append(keys, key) + } + return SetReadHeaderList(ctx, keys) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go new file mode 100644 index 00000000..bec84b85 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" +) + +// THeaderProtocol is a thrift protocol that implements THeader: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +// +// It supports either binary or compact protocol as the wrapped protocol. +// +// Most of the THeader handlings are happening inside THeaderTransport. +type THeaderProtocol struct { + transport *THeaderTransport + + // Will be initialized on first read/write. + protocol TProtocol + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderProtocolConf instead. +func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { + return newTHeaderProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying +// transport with given TConfiguration. +// +// The passed in transport will be wrapped with THeaderTransport. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { + return newTHeaderProtocolConf(trans, conf) +} + +func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { + t := NewTHeaderTransportConf(trans, cfg) + p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) + PropagateTConfiguration(p, cfg) + return &THeaderProtocol{ + transport: t, + protocol: p, + cfg: cfg, + } +} + +type tHeaderProtocolFactory struct { + cfg *TConfiguration +} + +func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return newTHeaderProtocolConf(trans, f.cfg) +} + +func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { + f.cfg = cfg +} + +// Deprecated: Use NewTHeaderProtocolFactoryConf instead. +func NewTHeaderProtocolFactory() TProtocolFactory { + return NewTHeaderProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolFactoryConf creates a factory for THeader with given +// TConfiguration. +func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { + return tHeaderProtocolFactory{ + cfg: conf, + } +} + +// Transport returns the underlying transport. +// +// It's guaranteed to be of type *THeaderTransport. +func (p *THeaderProtocol) Transport() TTransport { + return p.transport +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (p *THeaderProtocol) GetReadHeaders() THeaderMap { + return p.transport.GetReadHeaders() +} + +// SetWriteHeader sets a header for write. +func (p *THeaderProtocol) SetWriteHeader(key, value string) { + p.transport.SetWriteHeader(key, value) +} + +// ClearWriteHeaders clears all write headers previously set. +func (p *THeaderProtocol) ClearWriteHeaders() { + p.transport.ClearWriteHeaders() +} + +// AddTransform add a transform for writing. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. +func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { + return p.transport.AddTransform(transform) +} + +func (p *THeaderProtocol) Flush(ctx context.Context) error { + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { + newProto, err := p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + return err + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + p.transport.SequenceID = seqID + return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) +} + +func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { + if err := p.protocol.WriteMessageEnd(ctx); err != nil { + return err + } + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { + return p.protocol.WriteStructBegin(ctx, name) +} + +func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { + return p.protocol.WriteStructEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { + return p.protocol.WriteFieldBegin(ctx, name, typeID, id) +} + +func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { + return p.protocol.WriteFieldEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { + return p.protocol.WriteFieldStop(ctx) +} + +func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) +} + +func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { + return p.protocol.WriteMapEnd(ctx) +} + +func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteListBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { + return p.protocol.WriteListEnd(ctx) +} + +func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteSetBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { + return p.protocol.WriteSetEnd(ctx) +} + +func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { + return p.protocol.WriteBool(ctx, value) +} + +func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { + return p.protocol.WriteByte(ctx, value) +} + +func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { + return p.protocol.WriteI16(ctx, value) +} + +func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { + return p.protocol.WriteI32(ctx, value) +} + +func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { + return p.protocol.WriteI64(ctx, value) +} + +func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.protocol.WriteDouble(ctx, value) +} + +func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { + return p.protocol.WriteString(ctx, value) +} + +func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { + return p.protocol.WriteBinary(ctx, value) +} + +func (p *THeaderProtocol) WriteUUID(ctx context.Context, value Tuuid) error { + return p.protocol.WriteUUID(ctx, value) +} + +// ReadFrame calls underlying THeaderTransport's ReadFrame function. +func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { + return p.transport.ReadFrame(ctx) +} + +func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { + if err = p.transport.ReadFrame(ctx); err != nil { + return + } + + var newProto TProtocol + newProto, err = p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + var tAppExc TApplicationException + if !errors.As(err, &tAppExc) { + return + } + if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { + return + } + if e := tAppExc.Write(ctx, p.protocol); e != nil { + return + } + if e := p.protocol.WriteMessageEnd(ctx); e != nil { + return + } + if e := p.transport.Flush(ctx); e != nil { + return + } + return + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + + return p.protocol.ReadMessageBegin(ctx) +} + +func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { + return p.protocol.ReadMessageEnd(ctx) +} + +func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return p.protocol.ReadStructBegin(ctx) +} + +func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { + return p.protocol.ReadStructEnd(ctx) +} + +func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { + return p.protocol.ReadFieldBegin(ctx) +} + +func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { + return p.protocol.ReadFieldEnd(ctx) +} + +func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + return p.protocol.ReadMapBegin(ctx) +} + +func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { + return p.protocol.ReadMapEnd(ctx) +} + +func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadListBegin(ctx) +} + +func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { + return p.protocol.ReadListEnd(ctx) +} + +func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadSetBegin(ctx) +} + +func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { + return p.protocol.ReadSetEnd(ctx) +} + +func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { + return p.protocol.ReadBool(ctx) +} + +func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { + return p.protocol.ReadByte(ctx) +} + +func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { + return p.protocol.ReadI16(ctx) +} + +func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { + return p.protocol.ReadI32(ctx) +} + +func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { + return p.protocol.ReadI64(ctx) +} + +func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + return p.protocol.ReadDouble(ctx) +} + +func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { + return p.protocol.ReadString(ctx) +} + +func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + return p.protocol.ReadBinary(ctx) +} + +func (p *THeaderProtocol) ReadUUID(ctx context.Context) (value Tuuid, err error) { + return p.protocol.ReadUUID(ctx) +} + +func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { + return p.protocol.Skip(ctx, fieldType) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + PropagateTConfiguration(p.protocol, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) + _ TConfigurationSetter = (*THeaderProtocol)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go new file mode 100644 index 00000000..d6d64160 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go @@ -0,0 +1,872 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "compress/zlib" + "context" + "encoding/binary" + "errors" + "fmt" + "io" +) + +// Size in bytes for 32-bit ints. +const size32 = 4 + +type headerMeta struct { + MagicFlags uint32 + SequenceID int32 + HeaderLength uint16 +} + +const headerMetaSize = 10 + +type clientType int + +const ( + clientUnknown clientType = iota + clientHeaders + clientFramedBinary + clientUnframedBinary + clientFramedCompact + clientUnframedCompact +) + +// Constants defined in THeader format: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +const ( + THeaderHeaderMagic uint32 = 0x0fff0000 + THeaderHeaderMask uint32 = 0xffff0000 + THeaderFlagsMask uint32 = 0x0000ffff + THeaderMaxFrameSize uint32 = 0x3fffffff +) + +// THeaderMap is the type of the header map in THeader transport. +type THeaderMap map[string]string + +// THeaderProtocolID is the wrapped protocol id used in THeader. +type THeaderProtocolID int32 + +// Supported THeaderProtocolID values. +const ( + THeaderProtocolBinary THeaderProtocolID = 0x00 + THeaderProtocolCompact THeaderProtocolID = 0x02 + THeaderProtocolDefault = THeaderProtocolBinary +) + +// Declared globally to avoid repetitive allocations, not really used. +var globalMemoryBuffer = NewTMemoryBuffer() + +// Validate checks whether the THeaderProtocolID is a valid/supported one. +func (id THeaderProtocolID) Validate() error { + _, err := id.GetProtocol(globalMemoryBuffer) + return err +} + +// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. +func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { + switch id { + default: + return nil, NewTApplicationException( + INVALID_PROTOCOL, + fmt.Sprintf("THeader protocol id %d not supported", id), + ) + case THeaderProtocolBinary: + return NewTBinaryProtocolTransport(trans), nil + case THeaderProtocolCompact: + return NewTCompactProtocol(trans), nil + } +} + +// THeaderTransformID defines the numeric id of the transform used. +type THeaderTransformID int32 + +// THeaderTransformID values. +// +// Values not defined here are not currently supported, namely HMAC and Snappy. +const ( + TransformNone THeaderTransformID = iota // 0, no special handling + TransformZlib // 1, zlib +) + +var supportedTransformIDs = map[THeaderTransformID]bool{ + TransformNone: true, + TransformZlib: true, +} + +// TransformReader is an io.ReadCloser that handles transforms reading. +type TransformReader struct { + io.Reader + + closers []io.Closer +} + +var _ io.ReadCloser = (*TransformReader)(nil) + +// NewTransformReaderWithCapacity initializes a TransformReader with expected +// closers capacity. +// +// If you don't know the closers capacity beforehand, just use +// +// &TransformReader{Reader: baseReader} +// +// instead would be sufficient. +func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { + return &TransformReader{ + Reader: baseReader, + closers: make([]io.Closer, 0, capacity), + } +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tr *TransformReader) Close() error { + // Call closers in reversed order + for i := len(tr.closers) - 1; i >= 0; i-- { + if err := tr.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +// AddTransform adds a transform. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. +func (tr *TransformReader) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + readCloser, err := zlib.NewReader(tr.Reader) + if err != nil { + return err + } + tr.Reader = readCloser + tr.closers = append(tr.closers, readCloser) + } + return nil +} + +// TransformWriter is an io.WriteCloser that handles transforms writing. +type TransformWriter struct { + io.Writer + + closers []io.Closer +} + +var _ io.WriteCloser = (*TransformWriter)(nil) + +// NewTransformWriter creates a new TransformWriter with base writer and transforms. +func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { + writer := &TransformWriter{ + Writer: baseWriter, + closers: make([]io.Closer, 0, len(transforms)), + } + for _, id := range transforms { + if err := writer.AddTransform(id); err != nil { + return nil, err + } + } + return writer, nil +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tw *TransformWriter) Close() error { + // Call closers in reversed order + for i := len(tw.closers) - 1; i >= 0; i-- { + if err := tw.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +var zlibDefaultLevelWriterPool = newPool( + func() *zlib.Writer { + return zlib.NewWriter(nil) + }, + nil, +) + +type zlibPoolCloser struct { + writer *zlib.Writer +} + +func (z *zlibPoolCloser) Close() error { + defer func() { + z.writer.Reset(nil) + zlibDefaultLevelWriterPool.put(&z.writer) + }() + return z.writer.Close() +} + +// AddTransform adds a transform. +func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + writeCloser := zlibDefaultLevelWriterPool.get() + writeCloser.Reset(tw.Writer) + tw.Writer = writeCloser + tw.closers = append(tw.closers, &zlibPoolCloser{ + writer: writeCloser, + }) + } + return nil +} + +// THeaderInfoType is the type id of the info headers. +type THeaderInfoType int32 + +// Supported THeaderInfoType values. +const ( + _ THeaderInfoType = iota // Skip 0 + InfoKeyValue // 1 + // Rest of the info types are not supported. +) + +// THeaderTransport is a Transport mode that implements THeader. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +type THeaderTransport struct { + SequenceID int32 + Flags uint32 + + transport TTransport + + // THeaderMap for read and write + readHeaders THeaderMap + writeHeaders THeaderMap + + // Reading related variables. + reader *bufio.Reader + // When frame is detected, we read the frame fully into frameBuffer. + frameBuffer *bytes.Buffer + // When it's non-nil, Read should read from frameReader instead of + // reader, and EOF error indicates end of frame instead of end of all + // transport. + frameReader io.ReadCloser + + // Writing related variables + writeBuffer *bytes.Buffer + writeTransforms []THeaderTransformID + + clientType clientType + protocolID THeaderProtocolID + cfg *TConfiguration + + // buffer is used in the following scenarios to avoid repetitive + // allocations, while 4 is big enough for all those scenarios: + // + // * header padding (max size 4) + // * write the frame size (size 4) + buffer [4]byte +} + +var _ TTransport = (*THeaderTransport)(nil) + +// Deprecated: Use NewTHeaderTransportConf instead. +func NewTHeaderTransport(trans TTransport) *THeaderTransport { + return NewTHeaderTransportConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportConf creates THeaderTransport from the +// underlying transport, with given TConfiguration attached. +// +// If trans is already a *THeaderTransport, it will be returned as is, +// but with TConfiguration overridden by the value passed in. +// +// The protocol ID in TConfiguration is only useful for client transports. +// For servers, +// the protocol ID will be overridden again to the one set by the client, +// to ensure that servers always speak the same dialect as the client. +func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { + if ht, ok := trans.(*THeaderTransport); ok { + ht.SetTConfiguration(conf) + return ht + } + PropagateTConfiguration(trans, conf) + return &THeaderTransport{ + transport: trans, + reader: bufio.NewReader(trans), + writeHeaders: make(THeaderMap), + writeTransforms: conf.GetTHeaderTransforms(), + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, + } +} + +// Open calls the underlying transport's Open function. +func (t *THeaderTransport) Open() error { + return t.transport.Open() +} + +// IsOpen calls the underlying transport's IsOpen function. +func (t *THeaderTransport) IsOpen() bool { + return t.transport.IsOpen() +} + +// ReadFrame tries to read the frame header, guess the client type, and handle +// unframed clients. +func (t *THeaderTransport) ReadFrame(ctx context.Context) error { + if !t.needReadFrame() { + // No need to read frame, skipping. + return nil + } + + // Peek and handle the first 32 bits. + // They could either be the length field of a framed message, + // or the first bytes of an unframed message. + var buf []byte + var err error + // This is also usually the first read from a connection, + // so handle retries around socket timeouts. + _, deadlineSet := ctx.Deadline() + for { + buf, err = t.reader.Peek(size32) + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout and we still have time, + // continue trying + continue + } + // For anything else, do not retry + break + } + if err != nil { + return err + } + + frameSize := binary.BigEndian.Uint32(buf) + if frameSize&VERSION_MASK == VERSION_1 { + t.clientType = clientUnframedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientUnframedCompact + return nil + } + + // At this point it should be a framed message, + // sanity check on frameSize then discard the peeked part. + if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("frame too large"), + ) + } + t.reader.Discard(size32) + + // Read the frame fully into frameBuffer. + if t.frameBuffer == nil { + t.frameBuffer = bufPool.get() + } + _, err = io.CopyN(t.frameBuffer, t.reader, int64(frameSize)) + if err != nil { + return err + } + t.frameReader = io.NopCloser(t.frameBuffer) + + // Peek and handle the next 32 bits. + buf = t.frameBuffer.Bytes()[:size32] + version := binary.BigEndian.Uint32(buf) + if version&THeaderHeaderMask == THeaderHeaderMagic { + t.clientType = clientHeaders + return t.parseHeaders(ctx, frameSize) + } + if version&VERSION_MASK == VERSION_1 { + t.clientType = clientFramedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientFramedCompact + return nil + } + if err := t.endOfFrame(); err != nil { + return err + } + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + errors.New("unsupported client transport type"), + ) +} + +// endOfFrame does end of frame handling. +// +// It closes frameReader, and also resets frame related states. +func (t *THeaderTransport) endOfFrame() error { + defer func() { + bufPool.put(&t.frameBuffer) + t.frameReader = nil + }() + return t.frameReader.Close() +} + +func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { + if t.clientType != clientHeaders { + return nil + } + + var err error + var meta headerMeta + if err = binary.Read(t.frameBuffer, binary.BigEndian, &meta); err != nil { + return err + } + frameSize -= headerMetaSize + t.Flags = meta.MagicFlags & THeaderFlagsMask + t.SequenceID = meta.SequenceID + headerLength := int64(meta.HeaderLength) * 4 + if int64(frameSize) < headerLength { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("header size is larger than the whole frame"), + ) + } + headerBuf := NewTMemoryBuffer() + _, err = io.CopyN(headerBuf, t.frameBuffer, headerLength) + if err != nil { + return err + } + hp := NewTCompactProtocol(headerBuf) + hp.SetTConfiguration(t.cfg) + + // At this point the header is already read into headerBuf, + // and t.frameBuffer starts from the actual payload. + protoID, err := hp.readVarint32() + if err != nil { + return err + } + t.protocolID = THeaderProtocolID(protoID) + + // Reset writeTransforms to the ones from cfg, as we are going to add + // compression transforms from what we read, we don't want to accumulate + // different transforms read from different requests + t.writeTransforms = t.cfg.GetTHeaderTransforms() + + var transformCount int32 + transformCount, err = hp.readVarint32() + if err != nil { + return err + } + if transformCount > 0 { + reader := NewTransformReaderWithCapacity( + t.frameBuffer, + int(transformCount), + ) + t.frameReader = reader + transformIDs := make([]THeaderTransformID, transformCount) + for i := range int(transformCount) { + id, err := hp.readVarint32() + if err != nil { + return err + } + tID := THeaderTransformID(id) + transformIDs[i] = tID + + // For compression transforms, we should also add them + // to writeTransforms so that the response (assuming we + // are reading a request) would do the same compression. + switch tID { + case TransformZlib: + t.addWriteTransformsDedupe(tID) + } + } + // The transform IDs on the wire was added based on the order of + // writing, so on the reading side we need to reverse the order. + for i := transformCount - 1; i >= 0; i-- { + id := transformIDs[i] + if err := reader.AddTransform(id); err != nil { + return err + } + } + } + + // The info part does not use the transforms yet, so it's + // important to continue using headerBuf. + headers := make(THeaderMap) + for { + infoType, err := hp.readVarint32() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + if THeaderInfoType(infoType) == InfoKeyValue { + count, err := hp.readVarint32() + if err != nil { + return err + } + for range int(count) { + key, err := hp.ReadString(ctx) + if err != nil { + return err + } + value, err := hp.ReadString(ctx) + if err != nil { + return err + } + headers[key] = value + } + } else { + // Skip reading info section on the first + // unsupported info type. + break + } + } + t.readHeaders = headers + + return nil +} + +func (t *THeaderTransport) needReadFrame() bool { + if t.clientType == clientUnknown { + // This is a new connection that's never read before. + return true + } + if t.isFramed() && t.frameReader == nil { + // We just finished the last frame. + return true + } + return false +} + +func (t *THeaderTransport) Read(p []byte) (read int, err error) { + // Here using context.Background instead of a context passed in is safe. + // First is that there's no way to pass context into this function. + // Then, 99% of the case when calling this Read frame is already read + // into frameReader. ReadFrame here is more of preventing bugs that + // didn't call ReadFrame before calling Read. + err = t.ReadFrame(context.Background()) + if err != nil { + return + } + if t.frameReader != nil { + read, err = t.frameReader.Read(p) + if err == nil && t.frameBuffer.Len() <= 0 { + // the last Read finished the frame, do endOfFrame + // handling here. + err = t.endOfFrame() + } else if errors.Is(err, io.EOF) { + err = t.endOfFrame() + if err != nil { + return + } + if read == 0 { + // Try to read the next frame when we hit EOF + // (end of frame) immediately. + // When we got here, it means the last read + // finished the previous frame, but didn't + // do endOfFrame handling yet. + // We have to read the next frame here, + // as otherwise we would return 0 and nil, + // which is a case not handled well by most + // protocol implementations. + return t.Read(p) + } + } + return + } + return t.reader.Read(p) +} + +// Write writes data to the write buffer. +// +// You need to call Flush to actually write them to the transport. +func (t *THeaderTransport) Write(p []byte) (int, error) { + if t.writeBuffer == nil { + t.writeBuffer = bufPool.get() + } + return t.writeBuffer.Write(p) +} + +// Flush writes the appropriate header and the write buffer to the underlying transport. +func (t *THeaderTransport) Flush(ctx context.Context) error { + if t.writeBuffer == nil || t.writeBuffer.Len() == 0 { + return nil + } + + defer bufPool.put(&t.writeBuffer) + + switch t.clientType { + default: + fallthrough + case clientUnknown: + t.clientType = clientHeaders + fallthrough + case clientHeaders: + headers := NewTMemoryBuffer() + hp := NewTCompactProtocol(headers) + hp.SetTConfiguration(t.cfg) + if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { + return NewTTransportExceptionFromError(err) + } + for _, transform := range t.writeTransforms { + if _, err := hp.writeVarint32(int32(transform)); err != nil { + return NewTTransportExceptionFromError(err) + } + } + if len(t.writeHeaders) > 0 { + if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { + return NewTTransportExceptionFromError(err) + } + for key, value := range t.writeHeaders { + if err := hp.WriteString(ctx, key); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := hp.WriteString(ctx, value); err != nil { + return NewTTransportExceptionFromError(err) + } + } + } + padding := 4 - headers.Len()%4 + if padding < 4 { + buf := t.buffer[:padding] + for i := range buf { + buf[i] = 0 + } + if _, err := headers.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + payload := bufPool.get() + defer bufPool.put(&payload) + meta := headerMeta{ + MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, + SequenceID: t.SequenceID, + HeaderLength: uint16(headers.Len() / 4), + } + if err := binary.Write(payload, binary.BigEndian, meta); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(payload, headers); err != nil { + return NewTTransportExceptionFromError(err) + } + + writer, err := NewTransformWriter(payload, t.writeTransforms) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(writer, t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := writer.Close(); err != nil { + return NewTTransportExceptionFromError(err) + } + + // First write frame length + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(payload.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + // Then write the payload + if _, err := io.Copy(t.transport, payload); err != nil { + return NewTTransportExceptionFromError(err) + } + + case clientFramedBinary, clientFramedCompact: + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + fallthrough + case clientUnframedBinary, clientUnframedCompact: + if _, err := io.Copy(t.transport, t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + select { + default: + case <-ctx.Done(): + return NewTTransportExceptionFromError(ctx.Err()) + } + + return t.transport.Flush(ctx) +} + +// Close closes the transport, along with its underlying transport. +func (t *THeaderTransport) Close() error { + if err := t.Flush(context.Background()); err != nil { + return err + } + return t.transport.Close() +} + +// RemainingBytes calls underlying transport's RemainingBytes. +// +// Even in framed cases, because of all the possible compression transforms +// involved, the remaining frame size is likely to be different from the actual +// remaining readable bytes, so we don't bother to keep tracking the remaining +// frame size by ourselves and just use the underlying transport's +// RemainingBytes directly. +func (t *THeaderTransport) RemainingBytes() uint64 { + return t.transport.RemainingBytes() +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (t *THeaderTransport) GetReadHeaders() THeaderMap { + return t.readHeaders +} + +// SetWriteHeader sets a header for write. +func (t *THeaderTransport) SetWriteHeader(key, value string) { + t.writeHeaders[key] = value +} + +// ClearWriteHeaders clears all write headers previously set. +func (t *THeaderTransport) ClearWriteHeaders() { + t.writeHeaders = make(THeaderMap) +} + +// AddTransform add a transform for writing. +// +// NOTE: This is provided as a low-level API, but in general you should use +// TConfiguration.THeaderTransforms to set transforms for writing instead. +func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { + if !supportedTransformIDs[transform] { + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + fmt.Errorf("THeaderTransformID %d not supported", transform), + ) + } + t.writeTransforms = append(t.writeTransforms, transform) + return nil +} + +// Protocol returns the wrapped protocol id used in this THeaderTransport. +func (t *THeaderTransport) Protocol() THeaderProtocolID { + switch t.clientType { + default: + return t.protocolID + case clientFramedBinary, clientUnframedBinary: + return THeaderProtocolBinary + case clientFramedCompact, clientUnframedCompact: + return THeaderProtocolCompact + } +} + +func (t *THeaderTransport) isFramed() bool { + switch t.clientType { + default: + return false + case clientHeaders, clientFramedBinary, clientFramedCompact: + return true + } +} + +// addWriteTransformsDedupe adds id to writeTransforms only if it's not already +// there. +func (t *THeaderTransport) addWriteTransformsDedupe(id THeaderTransformID) { + for _, existingID := range t.writeTransforms { + if existingID == id { + return + } + } + t.writeTransforms = append(t.writeTransforms, id) +} + +// SetTConfiguration implements TConfigurationSetter. +func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(t.transport, cfg) + t.cfg = cfg +} + +// THeaderTransportFactory is a TTransportFactory implementation to create +// THeaderTransport. +// +// It also implements TConfigurationSetter. +type THeaderTransportFactory struct { + // The underlying factory, could be nil. + Factory TTransportFactory + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderTransportFactoryConf instead. +func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with +// the given *TConfiguration. +func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + return &THeaderTransportFactory{ + Factory: factory, + + cfg: conf, + } +} + +// GetTransport implements TTransportFactory. +func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if f.Factory != nil { + t, err := f.Factory.GetTransport(trans) + if err != nil { + return nil, err + } + return NewTHeaderTransportConf(t, f.cfg), nil + } + return NewTHeaderTransportConf(trans, f.cfg), nil +} + +// SetTConfiguration implements TConfigurationSetter. +func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.Factory, f.cfg) + f.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*THeaderTransportFactory)(nil) + _ TConfigurationSetter = (*THeaderTransport)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go new file mode 100644 index 00000000..a0f20665 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "strconv" +) + +// Default to using the shared http client. Library users are +// free to change this global client or specify one through +// THttpClientOptions. +var DefaultHttpClient *http.Client = http.DefaultClient + +type THttpClient struct { + client *http.Client + response *http.Response + url *url.URL + requestBuffer *bytes.Buffer + header http.Header +} + +type THttpClientTransportFactory struct { + options THttpClientOptions + url string +} + +func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*THttpClient) + if ok && t.url != nil { + return NewTHttpClientWithOptions(t.url.String(), p.options) + } + } + return NewTHttpClientWithOptions(p.url, p.options) +} + +type THttpClientOptions struct { + // If nil, DefaultHttpClient is used + Client *http.Client +} + +func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return &THttpClientTransportFactory{url: url, options: options} +} + +func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + parsedURL, err := url.Parse(urlstr) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + client := options.Client + if client == nil { + client = DefaultHttpClient + } + httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} + return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil +} + +func NewTHttpClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} + +// Set the HTTP Header for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") +func (p *THttpClient) SetHeader(key string, value string) { + p.header.Add(key, value) +} + +// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// hdrValue := httpTrans.GetHeader("User-Agent") +func (p *THttpClient) GetHeader(key string) string { + return p.header.Get(key) +} + +// Deletes the HTTP Header given a Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.DelHeader("User-Agent") +func (p *THttpClient) DelHeader(key string) { + p.header.Del(key) +} + +func (p *THttpClient) Open() error { + // do nothing + return nil +} + +func (p *THttpClient) IsOpen() bool { + return p.response != nil || p.requestBuffer != nil +} + +func (p *THttpClient) closeResponse() error { + var err error + if p.response != nil && p.response.Body != nil { + // The docs specify that if keepalive is enabled and the response body is not + // read to completion the connection will never be returned to the pool and + // reused. Errors are being ignored here because if the connection is invalid + // and this fails for some reason, the Close() method will do any remaining + // cleanup. + io.Copy(io.Discard, p.response.Body) + + err = p.response.Body.Close() + } + + p.response = nil + return err +} + +func (p *THttpClient) Close() error { + if p.requestBuffer != nil { + p.requestBuffer.Reset() + p.requestBuffer = nil + } + return p.closeResponse() +} + +func (p *THttpClient) Read(buf []byte) (int, error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + n, err := p.response.Body.Read(buf) + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { + return n, nil + } + return n, NewTTransportExceptionFromError(err) +} + +func (p *THttpClient) ReadByte() (c byte, err error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + return readByte(p.response.Body) +} + +func (p *THttpClient) Write(buf []byte) (int, error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.Write(buf) +} + +func (p *THttpClient) WriteByte(c byte) error { + if p.requestBuffer == nil { + return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteByte(c) +} + +func (p *THttpClient) WriteString(s string) (n int, err error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteString(s) +} + +func (p *THttpClient) Flush(ctx context.Context) error { + // Close any previous response body to avoid leaking connections. + p.closeResponse() + + // Give up the ownership of the current request buffer to http request, + // and create a new buffer for the next request. + buf := p.requestBuffer + p.requestBuffer = new(bytes.Buffer) + req, err := http.NewRequest("POST", p.url.String(), buf) + if err != nil { + return NewTTransportExceptionFromError(err) + } + req.Header = p.header + if ctx != nil { + req = req.WithContext(ctx) + } + response, err := p.client.Do(req) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if response.StatusCode != http.StatusOK { + // Close the response to avoid leaking file descriptors. closeResponse does + // more than just call Close(), so temporarily assign it and reuse the logic. + p.response = response + p.closeResponse() + + // TODO(pomack) log bad response + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) + } + p.response = response + return nil +} + +func (p *THttpClient) RemainingBytes() (num_bytes uint64) { + len := p.response.ContentLength + if len >= 0 { + return uint64(len) + } + + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// Deprecated: Use NewTHttpClientTransportFactory instead. +func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. +func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, options) +} + +// Deprecated: Use NewTHttpClientWithOptions instead. +func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, options) +} + +// Deprecated: Use NewTHttpClient instead. +func NewTHttpPostClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go new file mode 100644 index 00000000..c84aba95 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/gzip" + "io" + "net/http" + "strings" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} + +// gz transparently compresses the HTTP response if the client supports it. +func gz(handler http.HandlerFunc) http.HandlerFunc { + sp := newPool(func() *gzip.Writer { + return gzip.NewWriter(nil) + }, nil) + + return func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + handler(w, r) + return + } + w.Header().Set("Content-Encoding", "gzip") + gz := sp.get() + gz.Reset(w) + defer func() { + gz.Close() + sp.put(&gz) + }() + gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} + handler(gzw, r) + } +} + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go new file mode 100644 index 00000000..1c477990 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" + "io" +) + +// StreamTransport is a Transport made of an io.Reader and/or an io.Writer +type StreamTransport struct { + io.Reader + io.Writer + isReadWriter bool + closed bool +} + +type StreamTransportFactory struct { + Reader io.Reader + Writer io.Writer + isReadWriter bool +} + +func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*StreamTransport) + if ok { + if t.isReadWriter { + return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil + } + if t.Reader != nil && t.Writer != nil { + return NewStreamTransport(t.Reader, t.Writer), nil + } + if t.Reader != nil && t.Writer == nil { + return NewStreamTransportR(t.Reader), nil + } + if t.Reader == nil && t.Writer != nil { + return NewStreamTransportW(t.Writer), nil + } + return &StreamTransport{}, nil + } + } + if p.isReadWriter { + return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil + } + if p.Reader != nil && p.Writer != nil { + return NewStreamTransport(p.Reader, p.Writer), nil + } + if p.Reader != nil && p.Writer == nil { + return NewStreamTransportR(p.Reader), nil + } + if p.Reader == nil && p.Writer != nil { + return NewStreamTransportW(p.Writer), nil + } + return &StreamTransport{}, nil +} + +func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { + return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} +} + +func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportR(r io.Reader) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r)} +} + +func NewStreamTransportW(w io.Writer) *StreamTransport { + return &StreamTransport{Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { + bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) + return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} +} + +func (p *StreamTransport) IsOpen() bool { + return !p.closed +} + +// implicitly opened on creation, can't be reopened once closed +func (p *StreamTransport) Open() error { + if !p.closed { + return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") + } else { + return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") + } +} + +// Closes both the input and output streams. +func (p *StreamTransport) Close() error { + if p.closed { + return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") + } + p.closed = true + closedReader := false + if p.Reader != nil { + c, ok := p.Reader.(io.Closer) + if ok { + e := c.Close() + closedReader = true + if e != nil { + return e + } + } + p.Reader = nil + } + if p.Writer != nil && (!closedReader || !p.isReadWriter) { + c, ok := p.Writer.(io.Closer) + if ok { + e := c.Close() + if e != nil { + return e + } + } + p.Writer = nil + } + return nil +} + +// Flushes the underlying output stream if not null. +func (p *StreamTransport) Flush(ctx context.Context) error { + if p.Writer == nil { + return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") + } + f, ok := p.Writer.(Flusher) + if ok { + err := f.Flush() + if err != nil { + return NewTTransportExceptionFromError(err) + } + } + return nil +} + +func (p *StreamTransport) Read(c []byte) (n int, err error) { + n, err = p.Reader.Read(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) ReadByte() (c byte, err error) { + f, ok := p.Reader.(io.ByteReader) + if ok { + c, err = f.ReadByte() + } else { + c, err = readByte(p.Reader) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) Write(c []byte) (n int, err error) { + n, err = p.Writer.Write(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteByte(c byte) (err error) { + f, ok := p.Writer.(io.ByteWriter) + if ok { + err = f.WriteByte(c) + } else { + err = writeByte(p.Writer, c) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteString(s string) (n int, err error) { + f, ok := p.Writer.(stringWriter) + if ok { + n, err = f.WriteString(s) + } else { + n, err = p.Writer.Write([]byte(s)) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.Reader, conf) + PropagateTConfiguration(p.Writer, conf) +} + +var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go new file mode 100644 index 00000000..6743a7fe --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go @@ -0,0 +1,567 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/base64" + "fmt" +) + +const ( + THRIFT_JSON_PROTOCOL_VERSION = 1 +) + +// for references to _ParseContext see tsimplejson_protocol.go + +// JSON protocol implementation for thrift. +// Utilizes Simple JSON protocol +type TJSONProtocol struct { + *TSimpleJSONProtocol +} + +// Constructor +func NewTJSONProtocol(t TTransport) *TJSONProtocol { + v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) + return v +} + +// Factory +type TJSONProtocolFactory struct{} + +func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTJSONProtocol(trans) +} + +func NewTJSONProtocolFactory() *TJSONProtocolFactory { + return &TJSONProtocolFactory{} +} + +func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { + return e + } + if e := p.WriteString(ctx, name); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(ctx, seqId); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteI16(ctx, id); e != nil { + return e + } + if e := p.OutputObjectBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(typeId) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } + +func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(keyType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + s, e1 = p.TypeIdToString(valueType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + if e := p.WriteI64(ctx, int64(size)); e != nil { + return e + } + return p.OutputObjectBegin() +} + +func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { + if e := p.OutputObjectEnd(); e != nil { + return e + } + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { + if b { + return p.WriteI32(ctx, 1) + } + return p.WriteI32(ctx, 0) +} + +func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) +} + +func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) +} + +func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { + return p.OutputF64(v) +} + +func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { + return p.OutputString(v) +} + +func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + version, err := p.ReadI32(ctx) + if err != nil { + return name, typeId, seqId, err + } + if version != THRIFT_JSON_PROTOCOL_VERSION { + e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) + return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) + + } + if name, err = p.ReadString(ctx); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte(ctx) + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(ctx); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { + err := p.ParseListEnd() + return err +} + +func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { + b, _ := p.reader.Peek(1) + if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { + return "", STOP, -1, nil + } + fieldId, err := p.ReadI16(ctx) + if err != nil { + return "", STOP, fieldId, err + } + if _, err = p.ParseObjectStart(); err != nil { + return "", STOP, fieldId, err + } + sType, err := p.ReadString(ctx) + if err != nil { + return "", STOP, fieldId, err + } + fType, err := p.StringToTypeId(sType) + return "", fType, fieldId, err +} + +func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + sKeyType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + keyType, e = p.StringToTypeId(sKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + sValueType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + valueType, e = p.StringToTypeId(sValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64(ctx) + if err != nil { + return keyType, valueType, size, err + } + err = checkSizeForProtocol(int32(iSize), p.cfg) + if err != nil { + return keyType, valueType, 0, err + } + size = int(iSize) + + _, e = p.ParseObjectStart() + return keyType, valueType, size, e +} + +func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { + e := p.ParseObjectEnd() + if e != nil { + return e + } + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + value, err := p.ReadI32(ctx) + return (value != 0), err +} + +func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) + return int8(v), err +} + +func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) + return int16(v), err +} + +func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) + return int32(v), err +} + +func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { + err = p.writer.Flush() + if err == nil { + err = p.trans.Flush(ctx) + } + return NewTProtocolException(err) +} + +func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err := p.ParseI64() + if err != nil { + return elemType, 0, err + } + err = checkSizeForProtocol(int32(nSize), p.cfg) + if err != nil { + return elemType, 0, err + } + size = int(nSize) + return elemType, size, nil +} + +func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { + switch byte(fieldType) { + case BOOL: + return "tf", nil + case BYTE: + return "i8", nil + case I16: + return "i16", nil + case I32: + return "i32", nil + case I64: + return "i64", nil + case DOUBLE: + return "dbl", nil + case STRING: + return "str", nil + case STRUCT: + return "rec", nil + case MAP: + return "map", nil + case SET: + return "set", nil + case LIST: + return "lst", nil + case UUID: + return "uid", nil + } + + e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { + switch fieldType { + case "tf": + return TType(BOOL), nil + case "i8": + return TType(BYTE), nil + case "i16": + return TType(I16), nil + case "i32": + return TType(I32), nil + case "i64": + return TType(I64), nil + case "dbl": + return TType(DOUBLE), nil + case "str": + return TType(STRING), nil + case "rec": + return TType(STRUCT), nil + case "map": + return TType(MAP), nil + case "set": + return TType(SET), nil + case "lst": + return TType(LIST), nil + case "uid": + return TType(UUID), nil + } + + e := fmt.Errorf("Unknown type identifier: %s", fieldType) + return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/logger.go b/vendor/github.com/apache/thrift/lib/go/thrift/logger.go new file mode 100644 index 00000000..4a0affe6 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/logger.go @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "os" + "testing" +) + +// Logger is a simple wrapper of a logging function. +// +// In reality the users might actually use different logging libraries, and they +// are not always compatible with each other. +// +// Logger is meant to be a simple common ground that it's easy to wrap whatever +// logging library they use into. +// +// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design +// discussion behind it. +// +// Deprecated: This is no longer used by any thrift go library code, +// will be removed in the future version. +type Logger func(msg string) + +// NopLogger is a Logger implementation that does nothing. +// +// Deprecated: This is no longer used by any thrift go library code, +// will be removed in the future version. +func NopLogger(msg string) {} + +// StdLogger wraps stdlib log package into a Logger. +// +// If logger passed in is nil, it will fallback to use stderr and default flags. +// +// Deprecated: This is no longer used by any thrift go library code, +// will be removed in the future version. +func StdLogger(logger *log.Logger) Logger { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + return func(msg string) { + logger.Print(msg) + } +} + +// TestLogger is a Logger implementation can be used in test codes. +// +// It fails the test when being called. +// +// Deprecated: This is no longer used by any thrift go library code, +// will be removed in the future version. +func TestLogger(tb testing.TB) Logger { + return func(msg string) { + tb.Errorf("logger called with msg: %q", msg) + } +} + +func fallbackLogger(logger Logger) Logger { + if logger == nil { + return StdLogger(nil) + } + return logger +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go new file mode 100644 index 00000000..5936d273 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" +) + +// Memory buffer-based implementation of the TTransport interface. +type TMemoryBuffer struct { + *bytes.Buffer + size int +} + +type TMemoryBufferTransportFactory struct { + size int +} + +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*TMemoryBuffer) + if ok && t.size > 0 { + return NewTMemoryBufferLen(t.size), nil + } + } + return NewTMemoryBufferLen(p.size), nil +} + +func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { + return &TMemoryBufferTransportFactory{size: size} +} + +func NewTMemoryBuffer() *TMemoryBuffer { + return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} +} + +func NewTMemoryBufferLen(size int) *TMemoryBuffer { + buf := make([]byte, 0, size) + return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} +} + +func (p *TMemoryBuffer) IsOpen() bool { + return true +} + +func (p *TMemoryBuffer) Open() error { + return nil +} + +func (p *TMemoryBuffer) Close() error { + p.Buffer.Reset() + return nil +} + +// Flushing a memory buffer is a no-op +func (p *TMemoryBuffer) Flush(ctx context.Context) error { + return nil +} + +func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { + return uint64(p.Buffer.Len()) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go new file mode 100644 index 00000000..25ab2e98 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Message type constants in the Thrift protocol. +type TMessageType int32 + +const ( + INVALID_TMESSAGE_TYPE TMessageType = 0 + CALL TMessageType = 1 + REPLY TMessageType = 2 + EXCEPTION TMessageType = 3 + ONEWAY TMessageType = 4 +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go b/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go new file mode 100644 index 00000000..85c7e069 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the +// TProcessorFunctions for that TProcessor. +// +// Middlewares are passed in the name of the function as set in the processor +// map of the TProcessor. +type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction + +// WrapProcessor takes an existing TProcessor and wraps each of its inner +// TProcessorFunctions with the middlewares passed in and returns it. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { + for name, processorFunc := range processor.ProcessorMap() { + wrapped := processorFunc + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i](name, wrapped) + } + processor.AddToProcessorMap(name, wrapped) + } + return processor +} + +// WrappedTProcessorFunction is a convenience struct that implements the +// TProcessorFunction interface that can be used when implementing custom +// Middleware. +type WrappedTProcessorFunction struct { + // Wrapped is called by WrappedTProcessorFunction.Process and should be a + // "wrapped" call to a base TProcessorFunc.Process call. + Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// Process implements the TProcessorFunction interface using p.Wrapped. +func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { + return p.Wrapped(ctx, seqID, in, out) +} + +// verify that WrappedTProcessorFunction implements TProcessorFunction +var ( + _ TProcessorFunction = WrappedTProcessorFunction{} + _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) +) + +// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls +// with custom middleware. +type ClientMiddleware func(TClient) TClient + +// WrappedTClient is a convenience struct that implements the TClient interface +// using inner Wrapped function. +// +// This is provided to aid in developing ClientMiddleware. +type WrappedTClient struct { + Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +// Call implements the TClient interface by calling and returning c.Wrapped. +func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + return c.Wrapped(ctx, method, args, result) +} + +// verify that WrappedTClient implements TClient +var ( + _ TClient = WrappedTClient{} + _ TClient = (*WrappedTClient)(nil) +) + +// WrapClient wraps the given TClient in the given middlewares. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + client = middlewares[i](client) + } + return client +} + +// ExtractIDLExceptionClientMiddleware is a ClientMiddleware implementation that +// extracts exceptions defined in thrift IDL into the error return of +// TClient.Call. It uses ExtractExceptionFromResult under the hood. +// +// By default if a client call gets an exception defined in the thrift IDL, for +// example: +// +// service MyService { +// FooResponse foo(1: FooRequest request) throws ( +// 1: Exception1 error1, +// 2: Exception2 error2, +// ) +// } +// +// Exception1 or Exception2 will not be in the err return of TClient.Call, +// but in the result TStruct instead, and there's no easy access to them. +// If you have a ClientMiddleware that would need to access them, +// you can add this middleware into your client middleware chain, +// *after* your other middlewares need them, +// then your other middlewares will have access to those exceptions from the err +// return. +// +// Alternatively you can also just use ExtractExceptionFromResult in your client +// middleware directly to access those exceptions. +func ExtractIDLExceptionClientMiddleware(next TClient) TClient { + return WrappedTClient{ + Wrapped: func(ctx context.Context, method string, args, result TStruct) (_ ResponseMeta, err error) { + defer func() { + if err == nil { + err = ExtractExceptionFromResult(result) + } + }() + return next.Call(ctx, method, args, result) + }, + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go new file mode 100644 index 00000000..cacbf6be --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" + "strings" +) + +/* +TMultiplexedProtocol is a protocol-independent concrete decorator +that allows a Thrift client to communicate with a multiplexing Thrift server, +by prepending the service name to the function name during function calls. + +NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request +from a multiplexing client. + +This example uses a single socket transport to invoke two services: + +socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) +transport := thrift.NewTFramedTransport(socket) +protocol := thrift.NewTBinaryProtocolTransport(transport) + +mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") +service := Calculator.NewCalculatorClient(mp) + +mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") +service2 := WeatherReport.NewWeatherReportClient(mp2) + +err := transport.Open() +if err != nil { + t.Fatal("Unable to open client socket", err) +} + +fmt.Println(service.Add(2,2)) +fmt.Println(service2.GetTemperature()) +*/ + +type TMultiplexedProtocol struct { + TProtocol + serviceName string +} + +const MULTIPLEXED_SEPARATOR = ":" + +func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { + return &TMultiplexedProtocol{ + TProtocol: protocol, + serviceName: serviceName, + } +} + +func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + if typeId == CALL || typeId == ONEWAY { + return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + } else { + return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) + } +} + +/* +TMultiplexedProcessor is a TProcessor allowing +a single TServer to provide multiple services. + +To do so, you instantiate the processor and then register additional +processors with it, as shown in the following example: + +var processor = thrift.NewTMultiplexedProcessor() + +firstProcessor := +processor.RegisterProcessor("FirstService", firstProcessor) + +processor.registerProcessor( + "Calculator", + Calculator.NewCalculatorProcessor(&CalculatorHandler{}), +) + +processor.registerProcessor( + "WeatherReport", + WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), +) + +serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) +if err != nil { + t.Fatal("Unable to create server socket", err) +} +server := thrift.NewTSimpleServer2(processor, serverTransport) +server.Serve(); +*/ + +type TMultiplexedProcessor struct { + serviceProcessorMap map[string]TProcessor + DefaultProcessor TProcessor +} + +func NewTMultiplexedProcessor() *TMultiplexedProcessor { + return &TMultiplexedProcessor{ + serviceProcessorMap: make(map[string]TProcessor), + } +} + +// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" +// to TProcessorFunction for any registered processors. If there is also a +// DefaultProcessor, the keys for the methods on that processor will simply be +// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and +// other registered processors, then the keys will be a mix of both formats. +// +// The implementation differs with other TProcessors in that the map returned is +// a new map, while most TProcessors just return their internal mapping directly. +// This means that edits to the map returned by this implementation of ProcessorMap +// will not affect the underlying mapping within the TMultiplexedProcessor. +func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { + processorFuncMap := make(map[string]TProcessorFunction) + for name, processor := range t.serviceProcessorMap { + for method, processorFunc := range processor.ProcessorMap() { + processorFuncName := name + MULTIPLEXED_SEPARATOR + method + processorFuncMap[processorFuncName] = processorFunc + } + } + if t.DefaultProcessor != nil { + for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { + processorFuncMap[method] = processorFunc + } + } + return processorFuncMap +} + +// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on +// the format of "name". +// +// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", +// then it sets the given TProcessorFunction on the inner TProcessor with the +// ProcessorName component using the FunctionName component. +// +// If "name" is just in the format "{FunctionName}", that is to say there is no +// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor +// configured, then it will set the given TProcessorFunction on the DefaultProcessor +// using the given name. +// +// If there is not a TProcessor available for the given name, then this function +// does nothing. This can happen when there is no TProcessor registered for +// the given ProcessorName or if all that is given is the FunctionName and there +// is no DefaultProcessor set. +func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { + components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(components) != 2 { + if t.DefaultProcessor != nil && len(components) == 1 { + t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc) + } + return + } + processorName := components[0] + funcName := components[1] + if processor, ok := t.serviceProcessorMap[processorName]; ok { + processor.AddToProcessorMap(funcName, processorFunc) + } + +} + +// verify that TMultiplexedProcessor implements TProcessor +var _ TProcessor = (*TMultiplexedProcessor)(nil) + +func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { + t.DefaultProcessor = processor +} + +func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { + if t.serviceProcessorMap == nil { + t.serviceProcessorMap = make(map[string]TProcessor) + } + t.serviceProcessorMap[name] = processor +} + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin(ctx) + if err != nil { + return false, NewTProtocolException(err) + } + if typeId != CALL && typeId != ONEWAY { + return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, NewTProtocolException(fmt.Errorf( + "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", + name, + )) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, NewTProtocolException(fmt.Errorf( + "Service name not found: %s. Did you forget to call registerProcessor()?", + v[0], + )) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} + +//Protocol that use stored message for ReadMessageBegin +type storedMessageProtocol struct { + TProtocol + name string + typeId TMessageType + seqid int32 +} + +func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { + return &storedMessageProtocol{protocol, name, typeId, seqid} +} + +func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + return s.name, s.typeId, s.seqid, nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go new file mode 100644 index 00000000..e4512d20 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(iValue, 10) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(int64(iValue), 10) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go new file mode 100644 index 00000000..1eddfa70 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Pointer is the generic (type parameter) version of the helper function that +// converts types to pointer types. +func Pointer[T any](v T) *T { + return &v +} + +/////////////////////////////////////////////////////////////////////////////// +// This file is home to helpers that convert from various base types to +// respective pointer types. This is necessary because Go does not permit +// references to constants, nor can a pointer type to base type be allocated +// and initialized in a single expression. +// +// E.g., this is not allowed: +// +// var ip *int = &5 +// +// But this *is* allowed: +// +// func IntPtr(i int) *int { return &i } +// var ip *int = IntPtr(5) +// +// Since pointers to base types are commonplace as [optional] fields in +// exported thrift structs, we factor such helpers here. +/////////////////////////////////////////////////////////////////////////////// + +func Float32Ptr(v float32) *float32 { return &v } +func Float64Ptr(v float64) *float64 { return &v } +func IntPtr(v int) *int { return &v } +func Int8Ptr(v int8) *int8 { return &v } +func Int16Ptr(v int16) *int16 { return &v } +func Int32Ptr(v int32) *int32 { return &v } +func Int64Ptr(v int64) *int64 { return &v } +func StringPtr(v string) *string { return &v } +func Uint32Ptr(v uint32) *uint32 { return &v } +func Uint64Ptr(v uint64) *uint64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func ByteSlicePtr(v []byte) *[]byte { return &v } +func TuuidPtr(v Tuuid) *Tuuid { return &v } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pool.go b/vendor/github.com/apache/thrift/lib/go/thrift/pool.go new file mode 100644 index 00000000..1d623d42 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/pool.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "sync" +) + +// pool is a generic sync.Pool wrapper with bells and whistles. +type pool[T any] struct { + pool sync.Pool + reset func(*T) +} + +// newPool creates a new pool. +// +// Both generate and reset are optional. +// Default generate is just new(T), +// When reset is nil we don't do any additional resetting when calling get. +func newPool[T any](generate func() *T, reset func(*T)) *pool[T] { + if generate == nil { + generate = func() *T { + return new(T) + } + } + return &pool[T]{ + pool: sync.Pool{ + New: func() interface{} { + return generate() + }, + }, + reset: reset, + } +} + +func (p *pool[T]) get() *T { + r := p.pool.Get().(*T) + if p.reset != nil { + p.reset(r) + } + return r +} + +func (p *pool[T]) put(r **T) { + p.pool.Put(*r) + *r = nil +} + +var bufPool = newPool(nil, func(buf *bytes.Buffer) { + buf.Reset() +}) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go new file mode 100644 index 00000000..245a3ccf --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) + + // ProcessorMap returns a map of thrift method names to TProcessorFunctions. + ProcessorMap() map[string]TProcessorFunction + + // AddToProcessorMap adds the given TProcessorFunction to the internal + // processor map at the given key. + // + // If one is already set at the given key, it will be replaced with the new + // TProcessorFunction. + AddToProcessorMap(string, TProcessorFunction) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// The default processor factory just returns a singleton +// instance. +type TProcessorFactory interface { + GetProcessor(trans TTransport) TProcessor +} + +type tProcessorFactory struct { + processor TProcessor +} + +func NewTProcessorFactory(p TProcessor) TProcessorFactory { + return &tProcessorFactory{processor: p} +} + +func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { + return p.processor +} + +/** + * The default processor factory just returns a singleton + * instance. + */ +type TProcessorFunctionFactory interface { + GetProcessorFunction(trans TTransport) TProcessorFunction +} + +type tProcessorFunctionFactory struct { + processor TProcessorFunction +} + +func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { + return &tProcessorFunctionFactory{processor: p} +} + +func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { + return p.processor +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go new file mode 100644 index 00000000..68cfe4aa --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error + WriteMessageEnd(ctx context.Context) error + WriteStructBegin(ctx context.Context, name string) error + WriteStructEnd(ctx context.Context) error + WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error + WriteFieldEnd(ctx context.Context) error + WriteFieldStop(ctx context.Context) error + WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error + WriteMapEnd(ctx context.Context) error + WriteListBegin(ctx context.Context, elemType TType, size int) error + WriteListEnd(ctx context.Context) error + WriteSetBegin(ctx context.Context, elemType TType, size int) error + WriteSetEnd(ctx context.Context) error + WriteBool(ctx context.Context, value bool) error + WriteByte(ctx context.Context, value int8) error + WriteI16(ctx context.Context, value int16) error + WriteI32(ctx context.Context, value int32) error + WriteI64(ctx context.Context, value int64) error + WriteDouble(ctx context.Context, value float64) error + WriteString(ctx context.Context, value string) error + WriteBinary(ctx context.Context, value []byte) error + WriteUUID(ctx context.Context, value Tuuid) error + + ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd(ctx context.Context) error + ReadStructBegin(ctx context.Context) (name string, err error) + ReadStructEnd(ctx context.Context) error + ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) + ReadFieldEnd(ctx context.Context) error + ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) + ReadMapEnd(ctx context.Context) error + ReadListBegin(ctx context.Context) (elemType TType, size int, err error) + ReadListEnd(ctx context.Context) error + ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) + ReadSetEnd(ctx context.Context) error + ReadBool(ctx context.Context) (value bool, err error) + ReadByte(ctx context.Context) (value int8, err error) + ReadI16(ctx context.Context) (value int16, err error) + ReadI32(ctx context.Context) (value int32, err error) + ReadI64(ctx context.Context) (value int64, err error) + ReadDouble(ctx context.Context) (value float64, err error) + ReadString(ctx context.Context) (value string, err error) + ReadBinary(ctx context.Context) (value []byte, err error) + ReadUUID(ctx context.Context) (value Tuuid, err error) + + Skip(ctx context.Context, fieldType TType) (err error) + Flush(ctx context.Context) (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { + return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case BOOL: + _, err = self.ReadBool(ctx) + return + case BYTE: + _, err = self.ReadByte(ctx) + return + case I16: + _, err = self.ReadI16(ctx) + return + case I32: + _, err = self.ReadI32(ctx) + return + case I64: + _, err = self.ReadI64(ctx) + return + case DOUBLE: + _, err = self.ReadDouble(ctx) + return + case STRING: + _, err = self.ReadString(ctx) + return + case UUID: + _, err = self.ReadUUID(ctx) + return + case STRUCT: + if _, err = self.ReadStructBegin(ctx); err != nil { + return err + } + for { + _, typeId, _, err := self.ReadFieldBegin(ctx) + if err != nil { + return err + } + if typeId == STOP { + break + } + err = Skip(ctx, self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd(ctx) + } + return self.ReadStructEnd(ctx) + case MAP: + keyType, valueType, size, err := self.ReadMapBegin(ctx) + if err != nil { + return err + } + for range size { + err := Skip(ctx, self, keyType, maxDepth-1) + if err != nil { + return err + } + + err = Skip(ctx, self, valueType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadMapEnd(ctx) + case SET: + elemType, size, err := self.ReadSetBegin(ctx) + if err != nil { + return err + } + for range size { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd(ctx) + case LIST: + elemType, size, err := self.ReadListBegin(ctx) + if err != nil { + return err + } + for range size { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd(ctx) + default: + return NewTProtocolExceptionWithType(INVALID_DATA, fmt.Errorf("Unknown data type %d", fieldType)) + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go new file mode 100644 index 00000000..9dcf4bfd --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "errors" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + err error + msg string +} + +var _ TProtocolException = (*tProtocolException)(nil) + +func (tProtocolException) TExceptionType() TExceptionType { + return TExceptionTypeProtocol +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.msg +} + +func (p *tProtocolException) Error() string { + return p.msg +} + +func (p *tProtocolException) Unwrap() error { + return p.err +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + + if e, ok := err.(TProtocolException); ok { + return e + } + + if errors.As(err, new(base64.CorruptInputError)) { + return NewTProtocolExceptionWithType(INVALID_DATA, err) + } + + return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{ + typeId: errType, + err: err, + msg: err.Error(), + } +} + +func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { + return &tProtocolException{ + typeId: err.TypeId(), + err: err, + msg: prepend + err.Error(), + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go new file mode 100644 index 00000000..c40f796d --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory interface for constructing protocol instances. +type TProtocolFactory interface { + GetProtocol(trans TTransport) TProtocol +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go b/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go new file mode 100644 index 00000000..d884c6ac --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. +type responseHelperKey struct{} + +// TResponseHelper defines a object with a set of helper functions that can be +// retrieved from the context object passed into server handler functions. +// +// Use GetResponseHelper to retrieve the injected TResponseHelper implementation +// from the context object. +// +// The zero value of TResponseHelper is valid with all helper functions being +// no-op. +type TResponseHelper struct { + // THeader related functions + *THeaderResponseHelper +} + +// THeaderResponseHelper defines THeader related TResponseHelper functions. +// +// The zero value of *THeaderResponseHelper is valid with all helper functions +// being no-op. +type THeaderResponseHelper struct { + proto *THeaderProtocol +} + +// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the +// underlying TProtocol. +func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { + if hp, ok := proto.(*THeaderProtocol); ok { + return &THeaderResponseHelper{ + proto: hp, + } + } + return nil +} + +// SetHeader sets a response header. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) SetHeader(key, value string) { + if h != nil && h.proto != nil { + h.proto.SetWriteHeader(key, value) + } +} + +// ClearHeaders clears all the response headers previously set. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) ClearHeaders() { + if h != nil && h.proto != nil { + h.proto.ClearWriteHeaders() + } +} + +// GetResponseHelper retrieves the TResponseHelper implementation injected into +// the context object. +// +// If no helper was found in the context object, a nop helper with ok == false +// will be returned. +func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { + if v := ctx.Value(responseHelperKey{}); v != nil { + helper, ok = v.(TResponseHelper) + } + return +} + +// SetResponseHelper injects TResponseHelper into the context object. +func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { + return context.WithValue(ctx, responseHelperKey{}, helper) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go new file mode 100644 index 00000000..83fdf29f --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type RichTransport struct { + TTransport +} + +// Wraps Transport to provide TRichTransport interface +func NewTRichTransport(trans TTransport) *RichTransport { + return &RichTransport{trans} +} + +func (r *RichTransport) ReadByte() (c byte, err error) { + return readByte(r.TTransport) +} + +func (r *RichTransport) WriteByte(c byte) error { + return writeByte(r.TTransport, c) +} + +func (r *RichTransport) WriteString(s string) (n int, err error) { + return r.Write([]byte(s)) +} + +func (r *RichTransport) RemainingBytes() (num_bytes uint64) { + return r.TTransport.RemainingBytes() +} + +func readByte(r io.Reader) (c byte, err error) { + v := [1]byte{0} + n, err := r.Read(v[0:1]) + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { + return v[0], nil + } + if n > 0 && err != nil { + return v[0], err + } + if err != nil { + return 0, err + } + return v[0], nil +} + +func writeByte(w io.Writer, c byte) error { + v := [1]byte{c} + _, err := w.Write(v[0:1]) + return err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go new file mode 100644 index 00000000..53a674e7 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(ctx context.Context, p TProtocol) error + Read(ctx context.Context, p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { + t.Transport.Reset() + if r, ok := t.Protocol.(reseter); ok { + r.Reset() + } + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + if err = t.Transport.Flush(ctx); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { + t.Transport.Reset() + if r, ok := t.Protocol.(reseter); ok { + r.Reset() + } + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + + if err = t.Transport.Flush(ctx); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} + +// TSerializerPool is the thread-safe version of TSerializer, it uses resource +// pool of TSerializer under the hood. +// +// It must be initialized with either NewTSerializerPool or +// NewTSerializerPoolSizeFactory. +type TSerializerPool struct { + pool *pool[TSerializer] +} + +// NewTSerializerPool creates a new TSerializerPool. +// +// NewTSerializer can be used as the arg here. +func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { + return &TSerializerPool{ + pool: newPool(f, nil), + } +} + +// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given +// size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { + return &TSerializerPool{ + pool: newPool(func() *TSerializer { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } + }, nil), + } +} + +func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { + s := t.pool.get() + defer t.pool.put(&s) + return s.WriteString(ctx, msg) +} + +func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { + s := t.pool.get() + defer t.pool.put(&s) + return s.Write(ctx, msg) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/vendor/github.com/apache/thrift/lib/go/thrift/server.go new file mode 100644 index 00000000..f813fa35 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server.go @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TServer interface { + ProcessorFactory() TProcessorFactory + ServerTransport() TServerTransport + InputTransportFactory() TTransportFactory + OutputTransportFactory() TTransportFactory + InputProtocolFactory() TProtocolFactory + OutputProtocolFactory() TProtocolFactory + + // Starts the server + Serve() error + // Stops the server. This is optional on a per-implementation basis. Not + // all servers are required to be cleanly stoppable. + Stop() error +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go new file mode 100644 index 00000000..7dd24ae3 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "sync" + "time" +) + +type TServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + + // Protects the interrupted value to make it thread safe. + mu sync.RWMutex + interrupted bool +} + +func NewTServerSocket(listenAddr string) (*TServerSocket, error) { + return NewTServerSocketTimeout(listenAddr, 0) +} + +func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil +} + +// Creates a TServerSocket from a net.Addr +func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { + return &TServerSocket{addr: addr, clientTimeout: clientTimeout} +} + +func (p *TServerSocket) Listen() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return nil + } + l, err := net.Listen(p.addr.Network(), p.addr.String()) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TServerSocket) Accept() (TTransport, error) { + p.mu.RLock() + interrupted := p.interrupted + p.mu.RUnlock() + + if interrupted { + return nil, errTransportInterrupted + } + + p.mu.Lock() + listener := p.listener + p.mu.Unlock() + if listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + + conn, err := listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TServerSocket) Open() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TServerSocket) Close() error { + var err error + p.mu.Lock() + if p.IsListening() { + err = p.listener.Close() + p.listener = nil + } + p.mu.Unlock() + return err +} + +func (p *TServerSocket) Interrupt() error { + p.mu.Lock() + p.interrupted = true + p.mu.Unlock() + p.Close() + + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go new file mode 100644 index 00000000..51c40b64 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Server transport. Object which provides client transports. +type TServerTransport interface { + Listen() error + Accept() (TTransport, error) + Close() error + + // Optional method implementation. This signals to the server transport + // that it should break out of any accept() or listen() that it is currently + // blocked on. This method, if implemented, MUST be thread safe, as it may + // be called from a different thread context than the other TServerTransport + // methods. + Interrupt() error +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go new file mode 100644 index 00000000..ec12991a --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go @@ -0,0 +1,1352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" +) + +type _ParseContext int + +const ( + _CONTEXT_INVALID _ParseContext = iota + _CONTEXT_IN_TOPLEVEL // 1 + _CONTEXT_IN_LIST_FIRST // 2 + _CONTEXT_IN_LIST // 3 + _CONTEXT_IN_OBJECT_FIRST // 4 + _CONTEXT_IN_OBJECT_NEXT_KEY // 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 +) + +func (p _ParseContext) String() string { + switch p { + case _CONTEXT_IN_TOPLEVEL: + return "TOPLEVEL" + case _CONTEXT_IN_LIST_FIRST: + return "LIST-FIRST" + case _CONTEXT_IN_LIST: + return "LIST" + case _CONTEXT_IN_OBJECT_FIRST: + return "OBJECT-FIRST" + case _CONTEXT_IN_OBJECT_NEXT_KEY: + return "OBJECT-NEXT-KEY" + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + return "OBJECT-NEXT-VALUE" + } + return "UNKNOWN-PARSE-CONTEXT" +} + +type jsonContextStack []_ParseContext + +func (s *jsonContextStack) push(v _ParseContext) { + *s = append(*s, v) +} + +func (s jsonContextStack) peek() (v _ParseContext, ok bool) { + l := len(s) + if l <= 0 { + return + } + return s[l-1], true +} + +func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { + l := len(*s) + if l <= 0 { + return + } + v = (*s)[l-1] + *s = (*s)[0 : l-1] + return v, true +} + +var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) + +// Simple JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +type TSimpleJSONProtocol struct { + trans TTransport + + cfg *TConfiguration + + parseContextStack jsonContextStack + dumpContext jsonContextStack + + writer *bufio.Writer + reader *bufio.Reader +} + +// Deprecated: Use NewTSimpleJSONProtocolConf instead.: +func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { + return NewTSimpleJSONProtocolConf(t, &TConfiguration{ + noPropagation: true, + }) +} + +func NewTSimpleJSONProtocolConf(t TTransport, conf *TConfiguration) *TSimpleJSONProtocol { + PropagateTConfiguration(t, conf) + v := &TSimpleJSONProtocol{ + trans: t, + cfg: conf, + writer: bufio.NewWriter(t), + reader: bufio.NewReader(t), + } + v.resetContextStack() + return v +} + +// Factory +type TSimpleJSONProtocolFactory struct { + cfg *TConfiguration +} + +func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTSimpleJSONProtocolConf(trans, p.cfg) +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TSimpleJSONProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Deprecated: Use NewTSimpleJSONProtocolFactoryConf instead. +func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{ + cfg: &TConfiguration{ + noPropagation: true, + }, + } +} + +func NewTSimpleJSONProtocolFactoryConf(conf *TConfiguration) *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{ + cfg: conf, + } +} + +var ( + JSON_COMMA []byte + JSON_COLON []byte + JSON_LBRACE []byte + JSON_RBRACE []byte + JSON_LBRACKET []byte + JSON_RBRACKET []byte + JSON_QUOTE byte + JSON_QUOTE_BYTES []byte + JSON_NULL []byte + JSON_TRUE []byte + JSON_FALSE []byte + JSON_INFINITY string + JSON_NEGATIVE_INFINITY string + JSON_NAN string + JSON_INFINITY_BYTES []byte + JSON_NEGATIVE_INFINITY_BYTES []byte + JSON_NAN_BYTES []byte +) + +func init() { + JSON_COMMA = []byte{','} + JSON_COLON = []byte{':'} + JSON_LBRACE = []byte{'{'} + JSON_RBRACE = []byte{'}'} + JSON_LBRACKET = []byte{'['} + JSON_RBRACKET = []byte{']'} + JSON_QUOTE = '"' + JSON_QUOTE_BYTES = []byte{'"'} + JSON_NULL = []byte{'n', 'u', 'l', 'l'} + JSON_TRUE = []byte{'t', 'r', 'u', 'e'} + JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} + JSON_INFINITY = "Infinity" + JSON_NEGATIVE_INFINITY = "-Infinity" + JSON_NAN = "NaN" + JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NAN_BYTES = []byte{'N', 'a', 'N'} +} + +func jsonQuote(s string) string { + b, _ := json.Marshal(s) + s1 := string(b) + return s1 +} + +func jsonUnquote(s string) (string, bool) { + s1 := new(string) + err := json.Unmarshal([]byte(s), s1) + return *s1, err == nil +} + +func mismatch(expected, actual string) error { + return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) +} + +func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteString(ctx, name); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(ctx, seqId); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteString(ctx, name); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } + +func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(keyType)); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(valueType)); e != nil { + return e + } + return p.WriteI32(ctx, int32(size)) +} + +func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { + return p.OutputBool(b) +} + +func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) +} + +func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) +} + +func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { + return p.OutputF64(v) +} + +func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { + return p.OutputString(v) +} + +func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) WriteUUID(ctx context.Context, v Tuuid) error { + return p.OutputString(v.String()) +} + +// Reading methods. +func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + if name, err = p.ReadString(ctx); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte(ctx) + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(ctx); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { + if err := p.ParsePreValue(); err != nil { + return "", STOP, 0, err + } + b, _ := p.reader.Peek(1) + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return "", STOP, 0, nil + case JSON_QUOTE: + p.reader.ReadByte() + name, err := p.ParseStringBody() + // simplejson is not meant to be read back into thrift + // - see http://wiki.apache.org/thrift/ThriftUsageJava + // - use JSON instead + if err != nil { + return name, STOP, 0, err + } + return name, STOP, -1, p.ParsePostValue() + } + e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) + return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return "", STOP, 0, NewTProtocolException(io.EOF) +} + +func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + bKeyType, e := p.ReadByte(ctx) + keyType = TType(bKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + bValueType, e := p.ReadByte(ctx) + valueType = TType(bValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64(ctx) + if err != nil { + return keyType, valueType, 0, err + } + err = checkSizeForProtocol(int32(size), p.cfg) + if err != nil { + return keyType, valueType, 0, err + } + size = int(iSize) + return keyType, valueType, size, err +} + +func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + var value bool + + if err := p.ParsePreValue(); err != nil { + return value, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 { + switch f[0] { + case JSON_TRUE[0]: + b := make([]byte, len(JSON_TRUE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_TRUE) { + value = true + } else { + e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_FALSE[0]: + b := make([]byte, len(JSON_FALSE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_FALSE) { + value = false + } else { + e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_NULL[0]: + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_NULL) { + value = false + } else { + e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + default: + e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + return value, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) + return int8(v), err +} + +func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) + return int16(v), err +} + +func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) + return int32(v), err +} + +func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadUUID(ctx context.Context) (v Tuuid, err error) { + var s string + s, err = p.ReadString(ctx) + if err != nil { + return v, err + } + v, err = ParseTuuid(s) + return v, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.writer.Flush()) +} + +func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TSimpleJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TSimpleJSONProtocol) OutputPreValue() error { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: + if _, e := p.write(JSON_COMMA); e != nil { + return NewTProtocolException(e) + } + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if _, e := p.write(JSON_COLON); e != nil { + return NewTProtocolException(e) + } + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputPostValue() error { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_LIST) + case _CONTEXT_IN_OBJECT_FIRST: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_KEY: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputBool(value bool) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if value { + v = string(JSON_TRUE) + } else { + v = string(JSON_FALSE) + } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputNull() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_NULL); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputF64(value float64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if math.IsNaN(value) { + v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) + } else if math.IsInf(value, 1) { + v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) + } else if math.IsInf(value, -1) { + v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) + } else { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + v = strconv.FormatFloat(value, 'g', -1, 64) + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = string(JSON_QUOTE) + v + string(JSON_QUOTE) + } + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputI64(value int64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + v := strconv.FormatInt(value, 10) + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputString(s string) error { + if e := p.OutputPreValue(); e != nil { + return e + } + if e := p.OutputStringData(jsonQuote(s)); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputStringData(s string) error { + _, e := p.write([]byte(s)) + return NewTProtocolException(e) +} + +func (p *TSimpleJSONProtocol) OutputObjectBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) + return nil +} + +func (p *TSimpleJSONProtocol) OutputObjectEnd() error { + if _, e := p.write(JSON_RBRACE); e != nil { + return NewTProtocolException(e) + } + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputListBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) + return nil +} + +func (p *TSimpleJSONProtocol) OutputListEnd() error { + if _, e := p.write(JSON_RBRACKET); e != nil { + return NewTProtocolException(e) + } + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.OutputI64(int64(elemType)); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePreValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } + b, _ := p.reader.Peek(1) + switch cxt { + case _CONTEXT_IN_LIST: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACKET[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + case _CONTEXT_IN_OBJECT_NEXT_KEY: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if len(b) > 0 { + switch b[0] { + case JSON_COLON[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePostValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_LIST) + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) + } + return nil +} + +func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { + for { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return nil + } + switch b[0] { + case ' ', '\r', '\n', '\t': + p.reader.ReadByte() + continue + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + if endsWithoutEscapedQuote(line) { + v, ok := jsonUnquote(string(JSON_QUOTE) + line) + if !ok { + return "", NewTProtocolException(err) + } + return v, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + str := string(JSON_QUOTE) + line + s + v, ok := jsonUnquote(str) + if !ok { + e := fmt.Errorf("Unable to parse as JSON string %s", str) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { + var sb strings.Builder + + for { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + sb.WriteString(line) + if endsWithoutEscapedQuote(line) { + return sb.String(), nil + } + } +} + +func endsWithoutEscapedQuote(s string) bool { + l := len(s) + i := 1 + for ; i < l; i++ { + if s[l-i-1] != '\\' { + break + } + } + return i&0x01 == 1 +} + +func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { + line, err := p.reader.ReadBytes(JSON_QUOTE) + if err != nil { + return line, NewTProtocolException(err) + } + line2 := line[0 : len(line)-1] + l := len(line2) + if (l % 4) != 0 { + pad := 4 - (l % 4) + fill := [...]byte{'=', '=', '='} + line2 = append(line2, fill[:pad]...) + l = len(line2) + } + output := make([]byte, base64.StdEncoding.DecodedLen(l)) + n, err := base64.StdEncoding.Decode(output, line2) + return output[0:n], NewTProtocolException(err) +} + +func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value int64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Int64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value float64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Float64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { + if err := p.ParsePreValue(); err != nil { + return false, err + } + var b []byte + b, err := p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) > 0 && b[0] == JSON_LBRACE[0] { + p.reader.ReadByte() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) + return false, nil + } else if p.safePeekContains(JSON_NULL) { + return true, nil + } + e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) + return false, NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TSimpleJSONProtocol) ParseObjectEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt, _ := p.parseContextStack.peek() + if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { + e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACE[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', '}': + // do nothing + } + } + p.parseContextStack.pop() + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { + if e := p.ParsePreValue(); e != nil { + return false, e + } + var b []byte + b, err = p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { + p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) + p.reader.ReadByte() + isNull = false + } else if p.safePeekContains(JSON_NULL) { + isNull = true + } else { + err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) + } + return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + bElemType, _, err := p.ParseI64() + elemType = TType(bElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err := p.ParseI64() + if err != nil { + return elemType, 0, err + } + err = checkSizeForProtocol(int32(nSize), p.cfg) + if err != nil { + return elemType, 0, err + } + size = int(nSize) + return elemType, size, nil +} + +func (p *TSimpleJSONProtocol) ParseListEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt, _ := p.parseContextStack.peek() + if cxt != _CONTEXT_IN_LIST { + e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACKET[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): + // do nothing + } + } + p.parseContextStack.pop() + if cxt, ok := p.parseContextStack.peek(); !ok { + return errEmptyJSONContextStack + } else if cxt == _CONTEXT_IN_TOPLEVEL { + return nil + } + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { + cont := true + for cont { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return false, nil + } + switch b[0] { + default: + return false, nil + case JSON_NULL[0]: + cont = false + case ' ', '\n', '\r', '\t': + p.reader.ReadByte() + } + } + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + return true, nil + } + return false, nil +} + +func (p *TSimpleJSONProtocol) readQuoteIfNext() { + b, _ := p.reader.Peek(1) + if len(b) > 0 && b[0] == JSON_QUOTE { + p.reader.ReadByte() + } +} + +func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { + isNull, err := p.readIfNull() + if isNull || err != nil { + return NUMERIC_NULL, err + } + hasDecimalPoint := false + nextCanBeSign := true + hasE := false + MAX_LEN := 40 + buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) + continueFor := true + inQuotes := false + for continueFor { + c, err := p.reader.ReadByte() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return NUMERIC_NULL, NewTProtocolException(err) + } + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + buf.WriteByte(c) + nextCanBeSign = false + case '.': + if hasDecimalPoint { + e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if hasE { + e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasDecimalPoint, nextCanBeSign = true, false + case 'e', 'E': + if hasE { + e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasE, nextCanBeSign = true, true + case '-', '+': + if !nextCanBeSign { + e := fmt.Errorf("Negative sign within number") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + nextCanBeSign = false + case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: + p.reader.UnreadByte() + continueFor = false + case JSON_NAN[0]: + if buf.Len() == 0 { + buffer := make([]byte, len(JSON_NAN)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NAN != string(buffer) { + e := mismatch(JSON_NAN, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NAN, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_INFINITY[0]: + if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { + buffer := make([]byte, len(JSON_INFINITY)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_INFINITY != string(buffer) { + e := mismatch(JSON_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return INFINITY, nil + } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { + buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) + buffer[0] = JSON_NEGATIVE_INFINITY[0] + buffer[1] = c + _, e := p.reader.Read(buffer[2:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NEGATIVE_INFINITY != string(buffer) { + e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NEGATIVE_INFINITY, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_QUOTE: + if !inQuotes { + inQuotes = true + } + default: + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + if buf.Len() == 0 { + e := fmt.Errorf("Unable to parse number from empty string ''") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return NewNumericFromJSONString(buf.String(), false), nil +} + +// Safely peeks into the buffer, reading only what is necessary +func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { + for i := range b { + a, _ := p.reader.Peek(i + 1) + if len(a) < (i+1) || a[i] != b[i] { + return false + } + } + return true +} + +// Reset the context stack to its initial state. +func (p *TSimpleJSONProtocol) resetContextStack() { + p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} + p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} +} + +func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { + n, err := p.writer.Write(b) + if err != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + } + return n, err +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + p.cfg = conf +} + +// Reset resets this protocol's internal state. +// +// It's useful when a single protocol instance is reused after errors, to make +// sure the next use will not be in a bad state to begin with. An example is +// when it's used in serializer/deserializer pools. +func (p *TSimpleJSONProtocol) Reset() { + p.resetContextStack() + p.writer.Reset(p.trans) + p.reader.Reset(p.trans) +} + +var ( + _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) + _ TConfigurationSetter = (*TSimpleJSONProtocolFactory)(nil) +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go new file mode 100644 index 00000000..a8634fc5 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go @@ -0,0 +1,414 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "io" + "log/slog" + "net" + "sync" + "sync/atomic" + "time" +) + +// ServerConnectivityCheckInterval defines the ticker interval used by +// connectivity check in thrift compiled TProcessorFunc implementations. +// +// It's defined as a variable instead of constant, so that thrift server +// implementations can change its value to control the behavior. +// +// If it's changed to <=0, the feature will be disabled. +var ServerConnectivityCheckInterval = time.Millisecond * 5 + +// ServerStopTimeout defines max stop wait duration used by +// server stop to avoid hanging too long to wait for all client connections to be closed gracefully. +// +// It's defined as a variable instead of constant, so that thrift server +// implementations can change its value to control the behavior. +// +// If it's set to <=0, the feature will be disabled(by default), and the server will wait for +// for all the client connections to be closed gracefully. +var ServerStopTimeout = time.Duration(0) + +/* + * This is not a typical TSimpleServer as it is not blocked after accept a socket. + * It is more like a TThreadedServer that can handle different connections in different goroutines. + * This will work if golang user implements a conn-pool like thing in client side. + */ +type TSimpleServer struct { + closed atomic.Int32 + wg sync.WaitGroup + mu sync.Mutex + stopChan chan struct{} + + processorFactory TProcessorFactory + serverTransport TServerTransport + inputTransportFactory TTransportFactory + outputTransportFactory TTransportFactory + inputProtocolFactory TProtocolFactory + outputProtocolFactory TProtocolFactory + + // Headers to auto forward in THeaderProtocol + forwardHeaders []string + + logContext atomic.Pointer[context.Context] +} + +func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) +} + +func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory4(NewTProcessorFactory(processor), + serverTransport, + transportFactory, + protocolFactory, + ) +} + +func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(NewTProcessorFactory(processor), + serverTransport, + inputTransportFactory, + outputTransportFactory, + inputProtocolFactory, + outputProtocolFactory, + ) +} + +func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + NewTTransportFactory(), + NewTTransportFactory(), + NewTBinaryProtocolFactoryDefault(), + NewTBinaryProtocolFactoryDefault(), + ) +} + +func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + transportFactory, + transportFactory, + protocolFactory, + protocolFactory, + ) +} + +func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return &TSimpleServer{ + processorFactory: processorFactory, + serverTransport: serverTransport, + inputTransportFactory: inputTransportFactory, + outputTransportFactory: outputTransportFactory, + inputProtocolFactory: inputProtocolFactory, + outputProtocolFactory: outputProtocolFactory, + stopChan: make(chan struct{}), + } +} + +func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { + return p.processorFactory +} + +func (p *TSimpleServer) ServerTransport() TServerTransport { + return p.serverTransport +} + +func (p *TSimpleServer) InputTransportFactory() TTransportFactory { + return p.inputTransportFactory +} + +func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { + return p.outputTransportFactory +} + +func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { + return p.inputProtocolFactory +} + +func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { + return p.outputProtocolFactory +} + +func (p *TSimpleServer) Listen() error { + return p.serverTransport.Listen() +} + +// SetForwardHeaders sets the list of header keys that will be auto forwarded +// while using THeaderProtocol. +// +// "forward" means that when the server is also a client to other upstream +// thrift servers, the context object user gets in the processor functions will +// have both read and write headers set, with write headers being forwarded. +// Users can always override the write headers by calling SetWriteHeaderList +// before calling thrift client functions. +func (p *TSimpleServer) SetForwardHeaders(headers []string) { + size := len(headers) + if size == 0 { + p.forwardHeaders = nil + return + } + + keys := make([]string, size) + copy(keys, headers) + p.forwardHeaders = keys +} + +// SetLogger sets the logger used by this TSimpleServer. +// +// If no logger was set before Serve is called, a default logger using standard +// log library will be used. +// +// Deprecated: The logging inside TSimpleServer is now done via slog on error +// level, this does nothing now. It will be removed in a future version. +func (p *TSimpleServer) SetLogger(_ Logger) {} + +// SetLogContext sets the context to be used when logging errors inside +// TSimpleServer. +// +// If this is not called before calling Serve, context.Background() will be +// used. +func (p *TSimpleServer) SetLogContext(ctx context.Context) { + p.logContext.Store(&ctx) +} + +func (p *TSimpleServer) innerAccept() (int32, error) { + client, err := p.serverTransport.Accept() + p.mu.Lock() + defer p.mu.Unlock() + closed := p.closed.Load() + if closed != 0 { + return closed, nil + } + if err != nil { + return 0, err + } + if client != nil { + ctx, cancel := context.WithCancel(context.Background()) + p.wg.Add(2) + + go func() { + defer p.wg.Done() + defer cancel() + if err := p.processRequests(client); err != nil { + ctx := p.logContext.Load() + slog.ErrorContext(*ctx, "error processing request", "err", err) + } + }() + + go func() { + defer p.wg.Done() + select { + case <-ctx.Done(): + // client exited, do nothing + case <-p.stopChan: + // TSimpleServer.Close called, close the client connection + client.Close() + } + }() + } + return 0, nil +} + +func (p *TSimpleServer) AcceptLoop() error { + for { + closed, err := p.innerAccept() + if err != nil { + return err + } + if closed != 0 { + return nil + } + } +} + +func (p *TSimpleServer) Serve() error { + p.logContext.CompareAndSwap(nil, Pointer(context.Background())) + + err := p.Listen() + if err != nil { + return err + } + p.AcceptLoop() + return nil +} + +func (p *TSimpleServer) Stop() error { + p.mu.Lock() + defer p.mu.Unlock() + + if !p.closed.CompareAndSwap(0, 1) { + // Already closed + return nil + } + p.serverTransport.Interrupt() + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + p.wg.Wait() + }() + + if ServerStopTimeout > 0 { + timer := time.NewTimer(ServerStopTimeout) + select { + case <-timer.C: + case <-ctx.Done(): + } + close(p.stopChan) + timer.Stop() + } + + <-ctx.Done() + p.stopChan = make(chan struct{}) + return nil +} + +// If err is actually EOF or NOT_OPEN, return nil, otherwise return err as-is. +func treatEOFErrorsAsNil(err error) error { + if err == nil { + return nil + } + if errors.Is(err, io.EOF) { + return nil + } + var te TTransportException + // NOT_OPEN returned by processor.Process is usually caused by client + // abandoning the connection (e.g. client side time out, or just client + // closes connections from the pool because of shutting down). + // Those logs will be very noisy, so suppress those logs as well. + if errors.As(err, &te) && (te.TypeId() == END_OF_FILE || te.TypeId() == NOT_OPEN) { + return nil + } + return err +} + +func (p *TSimpleServer) processRequests(client TTransport) (err error) { + defer func() { + err = treatEOFErrorsAsNil(err) + }() + + processor := p.processorFactory.GetProcessor(client) + inputTransport, err := p.inputTransportFactory.GetTransport(client) + if err != nil { + return err + } + inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) + var outputTransport TTransport + var outputProtocol TProtocol + + // for THeaderProtocol, we must use the same protocol instance for + // input and output so that the response is in the same dialect that + // the server detected the request was in. + headerProtocol, ok := inputProtocol.(*THeaderProtocol) + if ok { + outputProtocol = inputProtocol + } else { + oTrans, err := p.outputTransportFactory.GetTransport(client) + if err != nil { + return err + } + outputTransport = oTrans + outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) + } + + if inputTransport != nil { + defer inputTransport.Close() + } + if outputTransport != nil { + defer outputTransport.Close() + } + for { + if p.closed.Load() != 0 { + return nil + } + + ctx := SetResponseHelper( + defaultCtx, + TResponseHelper{ + THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), + }, + ) + if headerProtocol != nil { + // We need to call ReadFrame here, otherwise we won't + // get any headers on the AddReadTHeaderToContext call. + // + // ReadFrame is safe to be called multiple times so it + // won't break when it's called again later when we + // actually start to read the message. + if err := headerProtocol.ReadFrame(ctx); err != nil { + return err + } + ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) + ctx = SetWriteHeaderList(ctx, p.forwardHeaders) + } + + ok, err := processor.Process(ctx, inputProtocol, outputProtocol) + if errors.Is(err, ErrAbandonRequest) { + err := client.Close() + if errors.Is(err, net.ErrClosed) { + // In this case, it's kinda expected to get + // net.ErrClosed, treat that as no-error + return nil + } + return err + } + if errors.As(err, new(TTransportException)) && err != nil { + return err + } + var tae TApplicationException + if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { + continue + } + if !ok { + break + } + } + return nil +} + +// ErrAbandonRequest is a special error that server handler implementations can +// return to indicate that the request has been abandoned. +// +// TSimpleServer and compiler generated Process functions will check for this +// error, and close the client connection instead of trying to write the error +// back to the client. +// +// It shall only be used when the server handler implementation know that the +// client already abandoned the request (by checking that the passed in context +// is already canceled, for example). +// +// It also implements the interface defined by errors.Unwrap and always unwrap +// to context.Canceled error. +var ErrAbandonRequest = abandonRequestError{} + +type abandonRequestError struct{} + +func (abandonRequestError) Error() string { + return "request abandoned" +} + +func (abandonRequestError) Unwrap() error { + return context.Canceled +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/slog.go b/vendor/github.com/apache/thrift/lib/go/thrift/slog.go new file mode 100644 index 00000000..22545d84 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/slog.go @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/json" + "fmt" + "strings" +) + +// SlogTStructWrapper is a wrapper used by the compiler to wrap TStruct and +// TException to be better logged by slog. +type SlogTStructWrapper struct { + Type string `json:"type"` + Value TStruct `json:"value"` +} + +var ( + _ fmt.Stringer = SlogTStructWrapper{} + _ json.Marshaler = SlogTStructWrapper{} +) + +func (w SlogTStructWrapper) MarshalJSON() ([]byte, error) { + // Use an alias to avoid infinite recursion + type alias SlogTStructWrapper + return json.Marshal(alias(w)) +} + +func (w SlogTStructWrapper) String() string { + var sb strings.Builder + sb.WriteString(w.Type) + if err := json.NewEncoder(&sb).Encode(w.Value); err != nil { + // Should not happen, but just in case + return fmt.Sprintf("%s: %v", w.Type, w.Value) + } + // json encoder will write an additional \n at the end, get rid of it + return strings.TrimSuffix(sb.String(), "\n") +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go new file mode 100644 index 00000000..2185fb18 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go @@ -0,0 +1,241 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "net" + "time" +) + +type TSocket struct { + conn *socketConn + addr net.Addr + cfg *TConfiguration +} + +// tcpAddr is a naive implementation of net.Addr that does nothing extra. +type tcpAddr string + +var _ net.Addr = tcpAddr("") + +func (ta tcpAddr) Network() string { + return "tcp" +} + +func (ta tcpAddr) String() string { + return string(ta) +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + noPropagation: true, + }), nil +} + +// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSocketConf(hostPort string, conf *TConfiguration) *TSocket { + return NewTSocketFromAddrConf(tcpAddr(hostPort), conf) +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }), nil +} + +// NewTSocketFromAddrConf creates a TSocket from a net.Addr +func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { + return &TSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromAddrConf instead. +func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { + return NewTSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. +func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { + return &TSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromConnConf instead. +func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { + return NewTSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to set connect and socket timeouts. +func (p *TSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = createSocketConnFromReturn(net.DialTimeout( + p.addr.Network(), + p.addr.String(), + p.cfg.GetConnectTimeout(), + )); err != nil { + return &tTransportException{ + typeId: NOT_OPEN, + err: err, + msg: err.Error(), + } + } + p.addr = p.conn.RemoteAddr() + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSocket) Close() error { + return p.conn.Close() +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go new file mode 100644 index 00000000..dfd0913a --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "net" + "sync/atomic" +) + +// socketConn is a wrapped net.Conn that tries to do connectivity check. +type socketConn struct { + net.Conn + + buffer [1]byte + closed atomic.Int32 +} + +var _ net.Conn = (*socketConn)(nil) + +// createSocketConnFromReturn is a language sugar to help create socketConn from +// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. +func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { + if err != nil { + return nil, err + } + return &socketConn{ + Conn: conn, + }, nil +} + +// wrapSocketConn wraps an existing net.Conn into *socketConn. +func wrapSocketConn(conn net.Conn) *socketConn { + // In case conn is already wrapped, + // return it as-is and avoid double wrapping. + if sc, ok := conn.(*socketConn); ok { + return sc + } + + return &socketConn{ + Conn: conn, + } +} + +// isValid checks whether there's a valid connection. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// It's the same as the previous implementation of TSocket.IsOpen and +// TSSLSocket.IsOpen before we added connectivity check. +func (sc *socketConn) isValid() bool { + return sc != nil && sc.Conn != nil && sc.closed.Load() == 0 +} + +// IsOpen checks whether the connection is open. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// Otherwise, it tries to do a connectivity check and returns the result. +// +// It also has the side effect of resetting the previously set read deadline on +// the socket. As a result, it shouldn't be called between setting read deadline +// and doing actual read. +func (sc *socketConn) IsOpen() bool { + if !sc.isValid() { + return false + } + if err := sc.checkConn(); err != nil { + if !errors.Is(err, net.ErrClosed) { + // The connectivity check failed and the error is not + // that the connection is already closed, we need to + // close the connection explicitly here to avoid + // connection leaks. + sc.Close() + } + return false + } + return true +} + +// Read implements io.Reader. +// +// On Windows, it behaves the same as the underlying net.Conn.Read. +// +// On non-Windows, it treats len(p) == 0 as a connectivity check instead of +// readability check, which means instead of blocking until there's something to +// read (readability check), or always return (0, nil) (the default behavior of +// go's stdlib implementation on non-Windows), it never blocks, and will return +// an error if the connection is lost. +func (sc *socketConn) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, sc.read0() + } + + return sc.Conn.Read(p) +} + +func (sc *socketConn) Close() error { + if !sc.isValid() { + // Already closed + return net.ErrClosed + } + sc.closed.Store(1) + return sc.Conn.Close() +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go new file mode 100644 index 00000000..75ed91dd --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go @@ -0,0 +1,35 @@ +//go:build windows || wasm +// +build windows wasm + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +func (sc *socketConn) read0() error { + // On non-unix platforms, we fallback to the default behavior of reading 0 bytes. + var p []byte + _, err := sc.Conn.Read(p) + return err +} + +func (sc *socketConn) checkConn() error { + // On non-unix platforms, we always return nil for this check. + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go new file mode 100644 index 00000000..ac0dce9e --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go @@ -0,0 +1,84 @@ +//go:build !windows && !wasm +// +build !windows,!wasm + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" + "syscall" + "time" +) + +// We rely on this variable to be the zero time, +// but define it as global variable to avoid repetitive allocations. +// Please DO NOT mutate this variable in any way. +var zeroTime time.Time + +func (sc *socketConn) read0() error { + return sc.checkConn() +} + +func (sc *socketConn) checkConn() error { + syscallConn, ok := sc.Conn.(syscall.Conn) + if !ok { + // No way to check, return nil + return nil + } + + // The reading about to be done here is non-blocking so we don't really + // need a read deadline. We just need to clear the previously set read + // deadline, if any. + sc.Conn.SetReadDeadline(zeroTime) + + rc, err := syscallConn.SyscallConn() + if err != nil { + return err + } + + var n int + + if readErr := rc.Read(func(fd uintptr) bool { + n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + return true + }); readErr != nil { + return readErr + } + + if n > 0 { + // We got something, which means we are good + return nil + } + + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { + // This means the connection is still open but we don't have + // anything to read right now. + return nil + } + + if err != nil { + return err + } + + // At this point, it means the other side already closed the connection. + return io.EOF +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go new file mode 100644 index 00000000..3f05ad93 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + interrupted bool + cfg *tls.Config +} + +func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { + return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) +} + +func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil +} + +func (p *TSSLServerSocket) Listen() error { + if p.IsListening() { + return nil + } + l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TSSLServerSocket) Accept() (TTransport, error) { + if p.interrupted { + return nil, errTransportInterrupted + } + if p.listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + conn, err := p.listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TSSLServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLServerSocket) Open() error { + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TSSLServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TSSLServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TSSLServerSocket) Interrupt() error { + p.interrupted = true + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go new file mode 100644 index 00000000..d7ba415e --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go @@ -0,0 +1,262 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn *socketConn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + + cfg *TConfiguration +} + +// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans := thrift.NewTSSLSocketConf("localhost:9090", &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// +// TLSConfig: &tls.Config{ +// // Fill in tls config here. +// } +// }) +func NewTSSLSocketConf(hostPort string, conf *TConfiguration) *TSSLSocket { + if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{ + hostPort: hostPort, + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + TLSConfig: cfg, + + noPropagation: true, + }), nil +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }), nil +} + +// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. +func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromAddrConf instead. +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. +func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromConnConf instead. +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to change connect and socket timeouts. +func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + "tcp", + p.hostPort, + p.cfg.GetTLSConfig(), + )); err != nil { + return &tTransportException{ + typeId: NOT_OPEN, + err: err, + msg: err.Error(), + } + } + } else { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + p.addr.Network(), + p.addr.String(), + p.cfg.GetTLSConfig(), + )); err != nil { + return &tTransportException{ + typeId: NOT_OPEN, + err: err, + msg: err.Error(), + } + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + return p.conn.Close() +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf b/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf new file mode 100644 index 00000000..2ffe850b --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf @@ -0,0 +1,4 @@ +checks = [ + "inherit", + "-ST1005", # To be consistent with other language libraries we need capitalized error messages. +] diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go new file mode 100644 index 00000000..ba2738a8 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "io" +) + +var errTransportInterrupted = errors.New("Transport Interrupted") + +type Flusher interface { + Flush() (err error) +} + +type ContextFlusher interface { + Flush(ctx context.Context) (err error) +} + +type ReadSizeProvider interface { + RemainingBytes() (num_bytes uint64) +} + +// Encapsulates the I/O layer +type TTransport interface { + io.ReadWriteCloser + ContextFlusher + ReadSizeProvider + + // Opens the transport for communication + Open() error + + // Returns true if the transport is open + IsOpen() bool +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// This is "enchanced" transport with extra capabilities. You need to use one of these +// to construct protocol. +// Notably, TSocket does not implement this interface, and it is always a mistake to use +// TSocket directly in protocol. +type TRichTransport interface { + io.ReadWriter + io.ByteReader + io.ByteWriter + stringWriter + ContextFlusher + ReadSizeProvider +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go new file mode 100644 index 00000000..a51510ed --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error + msg string +} + +var _ TTransportException = (*tTransportException)(nil) + +func (tTransportException) TExceptionType() TExceptionType { + return TExceptionTypeTransport +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.msg +} + +func (p *tTransportException) Err() error { + return p.err +} + +func (p *tTransportException) Unwrap() error { + return p.err +} + +func (p *tTransportException) Timeout() bool { + return p.typeId == TIMED_OUT || isTimeoutError(p.err) +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{ + typeId: t, + err: errors.New(e), + msg: e, + } +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + te := &tTransportException{ + typeId: UNKNOWN_TRANSPORT_EXCEPTION, + err: e, + msg: e.Error(), + } + + if isTimeoutError(e) { + te.typeId = TIMED_OUT + return te + } + + if errors.Is(e, io.EOF) { + te.typeId = END_OF_FILE + return te + } + + return te +} + +func prependTTransportException(prepend string, e TTransportException) TTransportException { + return &tTransportException{ + typeId: e.TypeId(), + err: e, + msg: prepend + e.Error(), + } +} + +// isTimeoutError returns true when err is an error caused by timeout. +// +// Note that this also includes TTransportException wrapped timeout errors. +func isTimeoutError(err error) bool { + var t timeoutable + if errors.As(err, &t) { + return t.Timeout() + } + return false +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go new file mode 100644 index 00000000..c8058079 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory class used to create wrapped instance of Transports. +// This is used primarily in servers, which get Transports from +// a ServerTransport and then may want to mutate them (i.e. create +// a BufferedTransport from the underlying base transport) +type TTransportFactory interface { + GetTransport(trans TTransport) (TTransport, error) +} + +type tTransportFactory struct{} + +// Return a wrapped instance of the base Transport. +func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return trans, nil +} + +func NewTTransportFactory() TTransportFactory { + return &tTransportFactory{} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/vendor/github.com/apache/thrift/lib/go/thrift/type.go new file mode 100644 index 00000000..687557ea --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/type.go @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Type constants in the Thrift protocol +type TType byte + +const ( + STOP = 0 + VOID = 1 + BOOL = 2 + BYTE = 3 + I08 = 3 + DOUBLE = 4 + I16 = 6 + I32 = 8 + I64 = 10 + STRING = 11 + UTF7 = 11 + STRUCT = 12 + MAP = 13 + SET = 14 + LIST = 15 + UUID = 16 +) + +var typeNames = map[int]string{ + STOP: "STOP", + VOID: "VOID", + BOOL: "BOOL", + BYTE: "BYTE", + DOUBLE: "DOUBLE", + I16: "I16", + I32: "I32", + I64: "I64", + STRING: "STRING", + STRUCT: "STRUCT", + MAP: "MAP", + SET: "SET", + LIST: "LIST", + UUID: "UUID", +} + +func (p TType) String() string { + if s, ok := typeNames[int(p)]; ok { + return s + } + return "Unknown" +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/uuid.go b/vendor/github.com/apache/thrift/lib/go/thrift/uuid.go new file mode 100644 index 00000000..ab47331c --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/uuid.go @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/hex" + "fmt" +) + +// Tuuid is a minimal implementation of UUID for thrift's read/write operations. +// +// This implementation only covers read/write in various thrift protocols. +// If you need to generate/manipulate/etc. an UUID, +// you likely would need a third party UUID library instead. +// +// This type should be directly cast-able with most popular third party UUID +// libraries. +// For example, assuming you are using +// https://pkg.go.dev/github.com/google/uuid to generate a v4 UUID for an +// optional thrift field: +// +// id, err := uuid.NewRandom() +// if err != nil { +// // TODO: handle errors +// } +// myRequest.Uuid = thrift.Pointer(thrift.Tuuid(id)) +type Tuuid [16]byte + +// String generates the canonical form string for an Tuuid. +// +// This string is suitable for writing with TJSONProtocol. +func (u Tuuid) String() string { + var buf [36]byte + hex.Encode(buf[0:], u[:4]) + buf[8] = '-' + hex.Encode(buf[9:], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + return string(buf[:]) +} + +func hexToDec(b byte) (byte, bool) { + switch { + case b >= '0' && b <= '9': + return b - '0', true + case b >= 'a' && b <= 'f': + return b - 'a' + 10, true + case b >= 'A' && b <= 'F': + return b - 'A' + 10, true + default: + return 0, false + } +} + +func hexToByte(b1, b2 byte) (b byte, ok bool) { + b1, ok = hexToDec(b1) + if !ok { + return 0, ok + } + b2, ok = hexToDec(b2) + if !ok { + return 0, ok + } + return b1<<4 + b2, true +} + +// ParseTuuid parses a canonical form UUID string into Tuuid. +// +// Note that this function only supports case insensitive canonical form +// (8-4-4-4-12/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx), +// and rejects any other forms. +// For a more flexible UUID string parser, +// please use third party UUID libraries. +// +// This function is suitable for reading with TJSONProtocol. +func ParseTuuid(s string) (u Tuuid, err error) { + if len(s) != 36 || s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return u, fmt.Errorf("malformed Tuuid string: %q", s) + } + var ok bool + for i, j := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + u[i], ok = hexToByte(s[j], s[j+1]) + if !ok { + return u, fmt.Errorf("malformed Tuuid string: %q", s) + } + } + return u, nil +} + +// Must is a sugar to be used in places that error handling is impossible (for +// example, global variable declarations) and also errors are not in general +// expected. +// +// This is an example to use Must with ParseTuuid to declare a global special +// uuid: +// +// var NameSpaceDNSUUID = thrift.Must(thrift.ParseTuuid("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) +func Must[T any](v T, err error) T { + if err != nil { + panic(err) + } + return v +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go new file mode 100644 index 00000000..cefe1f99 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go @@ -0,0 +1,137 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. + */ + +package thrift + +import ( + "compress/zlib" + "context" + "io" +) + +// TZlibTransportFactory is a factory for TZlibTransport instances +type TZlibTransportFactory struct { + level int + factory TTransportFactory +} + +// TZlibTransport is a TTransport implementation that makes use of zlib compression. +type TZlibTransport struct { + reader io.ReadCloser + transport TTransport + writer *zlib.Writer +} + +// GetTransport constructs a new instance of NewTZlibTransport +func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if p.factory != nil { + // wrap other factory + var err error + trans, err = p.factory.GetTransport(trans) + if err != nil { + return nil, err + } + } + return NewTZlibTransport(trans, p.level) +} + +// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory +func NewTZlibTransportFactory(level int) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: nil} +} + +// NewTZlibTransportFactoryWithFactory constructs a new instance of TZlibTransportFactory +// as a wrapper over existing transport factory +func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: factory} +} + +// NewTZlibTransport constructs a new instance of TZlibTransport +func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { + w, err := zlib.NewWriterLevel(trans, level) + if err != nil { + return nil, err + } + + return &TZlibTransport{ + writer: w, + transport: trans, + }, nil +} + +// Close closes the reader and writer (flushing any unwritten data) and closes +// the underlying transport. +func (z *TZlibTransport) Close() error { + if z.reader != nil { + if err := z.reader.Close(); err != nil { + return err + } + } + if err := z.writer.Close(); err != nil { + return err + } + return z.transport.Close() +} + +// Flush flushes the writer and its underlying transport. +func (z *TZlibTransport) Flush(ctx context.Context) error { + if err := z.writer.Flush(); err != nil { + return err + } + return z.transport.Flush(ctx) +} + +// IsOpen returns true if the transport is open +func (z *TZlibTransport) IsOpen() bool { + return z.transport.IsOpen() +} + +// Open opens the transport for communication +func (z *TZlibTransport) Open() error { + return z.transport.Open() +} + +func (z *TZlibTransport) Read(p []byte) (int, error) { + if z.reader == nil { + r, err := zlib.NewReader(z.transport) + if err != nil { + return 0, NewTTransportExceptionFromError(err) + } + z.reader = r + } + + return z.reader.Read(p) +} + +// RemainingBytes returns the size in bytes of the data that is still to be +// read. +func (z *TZlibTransport) RemainingBytes() uint64 { + return z.transport.RemainingBytes() +} + +func (z *TZlibTransport) Write(p []byte) (int, error) { + return z.writer.Write(p) +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(z.transport, conf) +} + +var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/vendor/github.com/coreos/go-oidc/v3/LICENSE b/vendor/github.com/coreos/go-oidc/v3/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/go-oidc/v3/NOTICE b/vendor/github.com/coreos/go-oidc/v3/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go new file mode 100644 index 00000000..8afa895c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -0,0 +1,16 @@ +package oidc + +// JOSE asymmetric signing algorithm values as defined by RFC 7518 +// +// see: https://tools.ietf.org/html/rfc7518#section-3.1 +const ( + RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = "ES256" // ECDSA using P-256 and SHA-256 + ES384 = "ES384" // ECDSA using P-384 and SHA-384 + ES512 = "ES512" // ECDSA using P-521 and SHA-512 + PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 +) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go new file mode 100644 index 00000000..50dad7e0 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -0,0 +1,248 @@ +package oidc + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "io/ioutil" + "net/http" + "sync" + "time" + + jose "github.com/go-jose/go-jose/v3" +) + +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + +// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP +// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically +// used by NewProvider using the URLs returned by OpenID Connect discovery, but is +// exposed for providers that don't support discovery or to prevent round trips to the +// discovery URL. +// +// The returned KeySet is a long lived verifier that caches keys based on any +// keys change. Reuse a common remote key set instead of creating new ones as needed. +func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet { + return newRemoteKeySet(ctx, jwksURL, time.Now) +} + +func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet { + if now == nil { + now = time.Now + } + return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now} +} + +// RemoteKeySet is a KeySet implementation that validates JSON web tokens against +// a jwks_uri endpoint. +type RemoteKeySet struct { + jwksURL string + ctx context.Context + now func() time.Time + + // guard all other fields + mu sync.RWMutex + + // inflight suppresses parallel execution of updateKeys and allows + // multiple goroutines to wait for its result. + inflight *inflight + + // A set of cached keys. + cachedKeys []jose.JSONWebKey +} + +// inflight is used to wait on some in-flight request from multiple goroutines. +type inflight struct { + doneCh chan struct{} + + keys []jose.JSONWebKey + err error +} + +func newInflight() *inflight { + return &inflight{doneCh: make(chan struct{})} +} + +// wait returns a channel that multiple goroutines can receive on. Once it returns +// a value, the inflight request is done and result() can be inspected. +func (i *inflight) wait() <-chan struct{} { + return i.doneCh +} + +// done can only be called by a single goroutine. It records the result of the +// inflight request and signals other goroutines that the result is safe to +// inspect. +func (i *inflight) done(keys []jose.JSONWebKey, err error) { + i.keys = keys + i.err = err + close(i.doneCh) +} + +// result cannot be called until the wait() channel has returned a value. +func (i *inflight) result() ([]jose.JSONWebKey, error) { + return i.keys, i.err +} + +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + +// VerifySignature validates a payload against a signature from the jwks_uri. +// +// Users MUST NOT call this method directly and should use an IDTokenVerifier +// instead. This method skips critical validations such as 'alg' values and is +// only exported to implement the KeySet interface. +func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + var err error + jws, err = jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + } + return r.verify(ctx, jws) +} + +func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) { + // We don't support JWTs signed with multiple signatures. + keyID := "" + for _, sig := range jws.Signatures { + keyID = sig.Header.KeyID + break + } + + keys := r.keysFromCache() + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + + // If the kid doesn't match, check for new keys from the remote. This is the + // strategy recommended by the spec. + // + // https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys + keys, err := r.keysFromRemote(ctx) + if err != nil { + return nil, fmt.Errorf("fetching keys %v", err) + } + + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + return nil, errors.New("failed to verify id token signature") +} + +func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.cachedKeys +} + +// keysFromRemote syncs the key set from the remote set, records the values in the +// cache, and returns the key set. +func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) { + // Need to lock to inspect the inflight request field. + r.mu.Lock() + // If there's not a current inflight request, create one. + if r.inflight == nil { + r.inflight = newInflight() + + // This goroutine has exclusive ownership over the current inflight + // request. It releases the resource by nil'ing the inflight field + // once the goroutine is done. + go func() { + // Sync keys and finish inflight when that's done. + keys, err := r.updateKeys() + + r.inflight.done(keys, err) + + // Lock to update the keys and indicate that there is no longer an + // inflight request. + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + r.cachedKeys = keys + } + + // Free inflight so a different request can run. + r.inflight = nil + }() + } + inflight := r.inflight + r.mu.Unlock() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.wait(): + return inflight.result() + } +} + +func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) { + req, err := http.NewRequest("GET", r.jwksURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: can't create request: %v", err) + } + + resp, err := doRequest(r.ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: get keys failed %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body) + } + + var keySet jose.JSONWebKeySet + err = unmarshalResp(resp, body, &keySet) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body) + } + return keySet.Keys, nil +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go new file mode 100644 index 00000000..ae73eb02 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -0,0 +1,522 @@ +// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package. +package oidc + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io/ioutil" + "mime" + "net/http" + "strings" + "time" + + "golang.org/x/oauth2" +) + +const ( + // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests. + ScopeOpenID = "openid" + + // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting + // OAuth2 refresh tokens. + // + // Support for this scope differs between OpenID Connect providers. For instance + // Google rejects it, favoring appending "access_type=offline" as part of the + // authorization request instead. + // + // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess + ScopeOfflineAccess = "offline_access" +) + +var ( + errNoAtHash = errors.New("id token did not have an access token hash") + errInvalidAtHash = errors.New("access token hash does not match value in ID token") +) + +type contextKey int + +var issuerURLKey contextKey + +// ClientContext returns a new Context that carries the provided HTTP client. +// +// This method sets the same context key used by the golang.org/x/oauth2 package, +// so the returned context works for that package too. +// +// myClient := &http.Client{} +// ctx := oidc.ClientContext(parentContext, myClient) +// +// // This will use the custom client +// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") +// +func ClientContext(ctx context.Context, client *http.Client) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, client) +} + +// cloneContext copies a context's bag-of-values into a new context that isn't +// associated with its cancellation. This is used to initialize remote keys sets +// which run in the background and aren't associated with the initial context. +func cloneContext(ctx context.Context) context.Context { + cp := context.Background() + if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + cp = ClientContext(cp, c) + } + return cp +} + +// InsecureIssuerURLContext allows discovery to work when the issuer_url reported +// by upstream is mismatched with the discovery URL. This is meant for integration +// with off-spec providers such as Azure. +// +// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" +// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" +// +// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) +// +// // Provider will be discovered with the discoveryBaseURL, but use issuerURL +// // for future issuer validation. +// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) +// +// This is insecure because validating the correct issuer is critical for multi-tenant +// proivders. Any overrides here MUST be carefully reviewed. +func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context { + return context.WithValue(ctx, issuerURLKey, issuerURL) +} + +func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + client := http.DefaultClient + if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + client = c + } + return client.Do(req.WithContext(ctx)) +} + +// Provider represents an OpenID Connect server's configuration. +type Provider struct { + issuer string + authURL string + tokenURL string + userInfoURL string + algorithms []string + + // Raw claims returned by the server. + rawClaims []byte + + remoteKeySet KeySet +} + +type providerJSON struct { + Issuer string `json:"issuer"` + AuthURL string `json:"authorization_endpoint"` + TokenURL string `json:"token_endpoint"` + JWKSURL string `json:"jwks_uri"` + UserInfoURL string `json:"userinfo_endpoint"` + Algorithms []string `json:"id_token_signing_alg_values_supported"` +} + +// supportedAlgorithms is a list of algorithms explicitly supported by this +// package. If a provider supports other algorithms, such as HS256 or none, +// those values won't be passed to the IDTokenVerifier. +var supportedAlgorithms = map[string]bool{ + RS256: true, + RS384: true, + RS512: true, + ES256: true, + ES384: true, + ES512: true, + PS256: true, + PS384: true, + PS512: true, +} + +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + algorithms: p.Algorithms, + remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL), + } +} + +// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. +// +// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" +// or "https://login.salesforce.com". +func NewProvider(ctx context.Context, issuer string) (*Provider, error) { + wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" + req, err := http.NewRequest("GET", wellKnown, nil) + if err != nil { + return nil, err + } + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + var p providerJSON + err = unmarshalResp(resp, body, &p) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err) + } + + issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string) + if !skipIssuerValidation { + issuerURL = issuer + } + if p.Issuer != issuerURL && !skipIssuerValidation { + return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer) + } + var algs []string + for _, a := range p.Algorithms { + if supportedAlgorithms[a] { + algs = append(algs, a) + } + } + return &Provider{ + issuer: issuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + algorithms: algs, + rawClaims: body, + remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL), + }, nil +} + +// Claims unmarshals raw fields returned by the server during discovery. +// +// var claims struct { +// ScopesSupported []string `json:"scopes_supported"` +// ClaimsSupported []string `json:"claims_supported"` +// } +// +// if err := provider.Claims(&claims); err != nil { +// // handle unmarshaling error +// } +// +// For a list of fields defined by the OpenID Connect spec see: +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +func (p *Provider) Claims(v interface{}) error { + if p.rawClaims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(p.rawClaims, v) +} + +// Endpoint returns the OAuth2 auth and token endpoints for the given provider. +func (p *Provider) Endpoint() oauth2.Endpoint { + return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL} +} + +// UserInfo represents the OpenID Connect userinfo claims. +type UserInfo struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + + claims []byte +} + +type userInfoRaw struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + // Handle providers that return email_verified as a string + // https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and + // https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11 + EmailVerified stringAsBool `json:"email_verified"` +} + +// Claims unmarshals the raw JSON object claims into the provided object. +func (u *UserInfo) Claims(v interface{}) error { + if u.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(u.claims, v) +} + +// UserInfo uses the token source to query the provider's user info endpoint. +func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) { + if p.userInfoURL == "" { + return nil, errors.New("oidc: user info endpoint is not supported by this provider") + } + + req, err := http.NewRequest("GET", p.userInfoURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: create GET request: %v", err) + } + + token, err := tokenSource.Token() + if err != nil { + return nil, fmt.Errorf("oidc: get access token: %v", err) + } + token.SetAuthHeader(req) + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + ct := resp.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/jwt" { + payload, err := p.remoteKeySet.VerifySignature(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err) + } + body = payload + } + + var userInfo userInfoRaw + if err := json.Unmarshal(body, &userInfo); err != nil { + return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err) + } + return &UserInfo{ + Subject: userInfo.Subject, + Profile: userInfo.Profile, + Email: userInfo.Email, + EmailVerified: bool(userInfo.EmailVerified), + claims: body, + }, nil +} + +// IDToken is an OpenID Connect extension that provides a predictable representation +// of an authorization event. +// +// The ID Token only holds fields OpenID Connect requires. To access additional +// claims returned by the server, use the Claims method. +type IDToken struct { + // The URL of the server which issued this token. OpenID Connect + // requires this value always be identical to the URL used for + // initial discovery. + // + // Note: Because of a known issue with Google Accounts' implementation + // this value may differ when using Google. + // + // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo + Issuer string + + // The client ID, or set of client IDs, that this token is issued for. For + // common uses, this is the client that initialized the auth flow. + // + // This package ensures the audience contains an expected value. + Audience []string + + // A unique string which identifies the end user. + Subject string + + // Expiry of the token. Ths package will not process tokens that have + // expired unless that validation is explicitly turned off. + Expiry time.Time + // When the token was issued by the provider. + IssuedAt time.Time + + // Initial nonce provided during the authentication redirect. + // + // This package does NOT provided verification on the value of this field + // and it's the user's responsibility to ensure it contains a valid value. + Nonce string + + // at_hash claim, if set in the ID token. Callers can verify an access token + // that corresponds to the ID token using the VerifyAccessToken method. + AccessTokenHash string + + // signature algorithm used for ID token, needed to compute a verification hash of an + // access token + sigAlgorithm string + + // Raw payload of the id_token. + claims []byte + + // Map of distributed claim names to claim sources + distributedClaims map[string]claimSource +} + +// Claims unmarshals the raw JSON payload of the ID Token into a provided struct. +// +// idToken, err := idTokenVerifier.Verify(rawIDToken) +// if err != nil { +// // handle error +// } +// var claims struct { +// Email string `json:"email"` +// EmailVerified bool `json:"email_verified"` +// } +// if err := idToken.Claims(&claims); err != nil { +// // handle error +// } +// +func (i *IDToken) Claims(v interface{}) error { + if i.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(i.claims, v) +} + +// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token +// matches the hash in the id token. It returns an error if the hashes don't match. +// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token +// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken +func (i *IDToken) VerifyAccessToken(accessToken string) error { + if i.AccessTokenHash == "" { + return errNoAtHash + } + var h hash.Hash + switch i.sigAlgorithm { + case RS256, ES256, PS256: + h = sha256.New() + case RS384, ES384, PS384: + h = sha512.New384() + case RS512, ES512, PS512: + h = sha512.New() + default: + return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm) + } + h.Write([]byte(accessToken)) // hash documents that Write will never return an error + sum := h.Sum(nil)[:h.Size()/2] + actual := base64.RawURLEncoding.EncodeToString(sum) + if actual != i.AccessTokenHash { + return errInvalidAtHash + } + return nil +} + +type idToken struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience audience `json:"aud"` + Expiry jsonTime `json:"exp"` + IssuedAt jsonTime `json:"iat"` + NotBefore *jsonTime `json:"nbf"` + Nonce string `json:"nonce"` + AtHash string `json:"at_hash"` + ClaimNames map[string]string `json:"_claim_names"` + ClaimSources map[string]claimSource `json:"_claim_sources"` +} + +type claimSource struct { + Endpoint string `json:"endpoint"` + AccessToken string `json:"access_token"` +} + +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`: + *sb = true + case "false", `"false"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + +type audience []string + +func (a *audience) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + *a = audience{s} + return nil + } + var auds []string + if err := json.Unmarshal(b, &auds); err != nil { + return err + } + *a = auds + return nil +} + +type jsonTime time.Time + +func (j *jsonTime) UnmarshalJSON(b []byte) error { + var n json.Number + if err := json.Unmarshal(b, &n); err != nil { + return err + } + var unix int64 + + if t, err := n.Int64(); err == nil { + unix = t + } else { + f, err := n.Float64() + if err != nil { + return err + } + unix = int64(f) + } + *j = jsonTime(time.Unix(unix, 0)) + return nil +} + +func unmarshalResp(r *http.Response, body []byte, v interface{}) error { + err := json.Unmarshal(body, &v) + if err == nil { + return nil + } + ct := r.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/json" { + return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err) + } + return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err) +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go new file mode 100644 index 00000000..ade86157 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -0,0 +1,344 @@ +package oidc + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + jose "github.com/go-jose/go-jose/v3" + "golang.org/x/oauth2" +) + +const ( + issuerGoogleAccounts = "https://accounts.google.com" + issuerGoogleAccountsNoScheme = "accounts.google.com" +) + +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + +// KeySet is a set of publc JSON Web Keys that can be used to validate the signature +// of JSON web tokens. This is expected to be backed by a remote key set through +// provider metadata discovery or an in-memory set of keys delivered out-of-band. +type KeySet interface { + // VerifySignature parses the JSON web token, verifies the signature, and returns + // the raw payload. Header and claim fields are validated by other parts of the + // package. For example, the KeySet does not need to check values such as signature + // algorithm, issuer, and audience since the IDTokenVerifier validates these values + // independently. + // + // If VerifySignature makes HTTP requests to verify the token, it's expected to + // use any HTTP client associated with the context through ClientContext. + VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) +} + +// IDTokenVerifier provides verification for ID Tokens. +type IDTokenVerifier struct { + keySet KeySet + config *Config + issuer string +} + +// NewVerifier returns a verifier manually constructed from a key set and issuer URL. +// +// It's easier to use provider discovery to construct an IDTokenVerifier than creating +// one directly. This method is intended to be used with provider that don't support +// metadata discovery, or avoiding round trips when the key set URL is already known. +// +// This constructor can be used to create a verifier directly using the issuer URL and +// JSON Web Key Set URL without using discovery: +// +// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// +// Or a static key set (e.g. for testing): +// +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// +func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { + return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} +} + +// Config is the configuration for an IDTokenVerifier. +type Config struct { + // Expected audience of the token. For a majority of the cases this is expected to be + // the ID of the client that initialized the login flow. It may occasionally differ if + // the provider supports the authorizing party (azp) claim. + // + // If not provided, users must explicitly set SkipClientIDCheck. + ClientID string + // If specified, only this set of algorithms may be used to sign the JWT. + // + // If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this + // defaults to the set of algorithms the provider supports. Otherwise this values + // defaults to RS256. + SupportedSigningAlgs []string + + // If true, no ClientID check performed. Must be true if ClientID field is empty. + SkipClientIDCheck bool + // If true, token expiry is not checked. + SkipExpiryCheck bool + + // SkipIssuerCheck is intended for specialized cases where the the caller wishes to + // defer issuer validation. When enabled, callers MUST independently verify the Token's + // Issuer is a known good value. + // + // Mismatched issuers often indicate client mis-configuration. If mismatches are + // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling + // this option. + SkipIssuerCheck bool + + // Time function to check Token expiry. Defaults to time.Now + Now func() time.Time + + // InsecureSkipSignatureCheck causes this package to skip JWT signature validation. + // It's intended for special cases where providers (such as Azure), use the "none" + // algorithm. + // + // This option can only be enabled safely when the ID Token is received directly + // from the provider after the token exchange. + // + // This option MUST NOT be used when receiving an ID Token from sources other + // than the token endpoint. + InsecureSkipSignatureCheck bool +} + +// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. +func (p *Provider) Verifier(config *Config) *IDTokenVerifier { + if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { + // Make a copy so we don't modify the config values. + cp := &Config{} + *cp = *config + cp.SupportedSigningAlgs = p.algorithms + config = cp + } + return NewVerifier(p.issuer, p.remoteKeySet, config) +} + +func parseJWT(p string) ([]byte, error) { + parts := strings.Split(p, ".") + if len(parts) < 2 { + return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts)) + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err) + } + return payload, nil +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Returns the Claims from the distributed JWT token +func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) { + req, err := http.NewRequest("GET", src.Endpoint, nil) + if err != nil { + return nil, fmt.Errorf("malformed request: %v", err) + } + if src.AccessToken != "" { + req.Header.Set("Authorization", "Bearer "+src.AccessToken) + } + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode) + } + + token, err := verifier.Verify(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("malformed response body: %v", err) + } + + return token.claims, nil +} + +// Verify parses a raw ID Token, verifies it's been signed by the provider, performs +// any additional checks depending on the Config, and returns the payload. +// +// Verify does NOT do nonce validation, which is the callers responsibility. +// +// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation +// +// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) +// if err != nil { +// // handle error +// } +// +// // Extract the ID Token from oauth2 token. +// rawIDToken, ok := oauth2Token.Extra("id_token").(string) +// if !ok { +// // handle error +// } +// +// token, err := verifier.Verify(ctx, rawIDToken) +// +func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) { + // Throw out tokens with invalid claims before trying to verify the token. This lets + // us do cheap checks before possibly re-syncing keys. + payload, err := parseJWT(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + var token idToken + if err := json.Unmarshal(payload, &token); err != nil { + return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err) + } + + distributedClaims := make(map[string]claimSource) + + //step through the token to map claim names to claim sources" + for cn, src := range token.ClaimNames { + if src == "" { + return nil, fmt.Errorf("oidc: failed to obtain source from claim name") + } + s, ok := token.ClaimSources[src] + if !ok { + return nil, fmt.Errorf("oidc: source does not exist") + } + distributedClaims[cn] = s + } + + t := &IDToken{ + Issuer: token.Issuer, + Subject: token.Subject, + Audience: []string(token.Audience), + Expiry: time.Time(token.Expiry), + IssuedAt: time.Time(token.IssuedAt), + Nonce: token.Nonce, + AccessTokenHash: token.AtHash, + claims: payload, + distributedClaims: distributedClaims, + } + + // Check issuer. + if !v.config.SkipIssuerCheck && t.Issuer != v.issuer { + // Google sometimes returns "accounts.google.com" as the issuer claim instead of + // the required "https://accounts.google.com". Detect this case and allow it only + // for Google. + // + // We will not add hooks to let other providers go off spec like this. + if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) { + return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer) + } + } + + // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty. + // + // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party). + if !v.config.SkipClientIDCheck { + if v.config.ClientID != "" { + if !contains(t.Audience, v.config.ClientID) { + return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience) + } + } else { + return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set") + } + } + + // If a SkipExpiryCheck is false, make sure token is not expired. + if !v.config.SkipExpiryCheck { + now := time.Now + if v.config.Now != nil { + now = v.config.Now + } + nowTime := now() + + if t.Expiry.Before(nowTime) { + return nil, &TokenExpiredError{Expiry: t.Expiry} + } + + // If nbf claim is provided in token, ensure that it is indeed in the past. + if token.NotBefore != nil { + nbfTime := time.Time(*token.NotBefore) + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute + + if nowTime.Add(leeway).Before(nbfTime) { + return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) + } + } + } + + if v.config.InsecureSkipSignatureCheck { + return t, nil + } + + jws, err := jose.ParseSigned(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + + switch len(jws.Signatures) { + case 0: + return nil, fmt.Errorf("oidc: id token not signed") + case 1: + default: + return nil, fmt.Errorf("oidc: multiple signatures on id token not supported") + } + + sig := jws.Signatures[0] + supportedSigAlgs := v.config.SupportedSigningAlgs + if len(supportedSigAlgs) == 0 { + supportedSigAlgs = []string{RS256} + } + + if !contains(supportedSigAlgs, sig.Header.Algorithm) { + return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm) + } + + t.sigAlgorithm = sig.Header.Algorithm + + ctx = context.WithValue(ctx, parsedJWTKey, jws) + gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken) + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + + // Ensure that the payload returned by the square actually matches the payload parsed earlier. + if !bytes.Equal(gotPayload, payload) { + return nil, errors.New("oidc: internal error, payload parsed did not match previous payload") + } + + return t, nil +} + +// Nonce returns an auth code option which requires the ID Token created by the +// OpenID Connect provider to contain the specified nonce. +func Nonce(nonce string) oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("nonce", nonce) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/.gitignore b/vendor/github.com/databricks/databricks-sql-go/.gitignore new file mode 100644 index 00000000..60f5d83c --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/.gitignore @@ -0,0 +1,66 @@ +.vscode + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +test-results/ + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +.env + +# Output of the production build +bin/ + + +# Dependency directories +vendor/ + +*.log +*.bak +_tmp* + +.vscode/ +__debug_bin +.DS_Store diff --git a/vendor/github.com/databricks/databricks-sql-go/.golangci.yml b/vendor/github.com/databricks/databricks-sql-go/.golangci.yml new file mode 100644 index 00000000..b3863f0a --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/.golangci.yml @@ -0,0 +1,62 @@ +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + # - exportloopref + # - funlen + # - gochecknoinits + # - goconst + # - gocritic + # - gocyclo + - gofmt + # - goimports + # - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + # - lll + # - misspell + - nakedret + # - noctx + - nolintlint + - staticcheck + - structcheck + # - stylecheck + - typecheck + # - unconvert + # - unparam + - unused + - varcheck + # - whitespace + + # don't enable: + # - asciicheck + # - scopelint + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - interfacer + # - maligned + # - nestif + # - prealloc + # - testpackage + # - revive + # - wsl + +linters-settings: + gosec: + exclude-generated: true + severity: "low" + confidence: "low" + +run: + timeout: 5m diff --git a/vendor/github.com/databricks/databricks-sql-go/CHANGELOG.md b/vendor/github.com/databricks/databricks-sql-go/CHANGELOG.md new file mode 100644 index 00000000..0e9fa2bb --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/CHANGELOG.md @@ -0,0 +1,154 @@ +# Release History + +## v1.6.1 (2024-08-27) + +- Fix CloudFetch "row number N is not contained in any arrow batch" error (databricks/databricks-sql-go#234) + +## v1.6.0 (2024-07-31) + +- Security: Resolve HIGH vulnerability in x/net (CVE-2023-39325) (databricks/databricks-sql-go#233 by @anthonycrobinson) +- Expose `dbsql.ConnOption` type (databricks/databricks-sql-go#202 by @shelldandy) +- Fix a connection leak in PingContext (databricks/databricks-sql-go#240 by @jackyhu-db) + +## v1.5.7 (2024-06-05) + +- Reverted dependencies upgrade because of compatibility issues (databricks/databricks-sql-go#228) +- Add more debug logging (databricks/databricks-sql-go#227) + +## v1.5.6 (2024-05-28) + +- Added connection option `WithSkipTLSHostVerify` (databricks/databricks-sql-go#225 by @jackyhu-db) + +## v1.5.5 (2024-04-16) + +- Fix: handle `nil` values passed as query parameter (databricks/databricks-sql-go#199 by @esdrasbeleza) +- Fix: provide content length on staging file put (databricks/databricks-sql-go#217 by @candiduslynx) +- Fix formatting of *float64 parameters (databricks/databricks-sql-go#215 by @esdrasbeleza) +- Fix: use correct tenant ID for different Azure domains (databricks/databricks-sql-go#210 by @tubiskasaroos) + +## v1.5.4 (2024-04-10) + +- Added OAuth support for GCP (databricks/databricks-sql-go#189 by @rcypher-databricks) +- Staging operations: stream files instead of loading into memory (databricks/databricks-sql-go#197 by @mdibaiee) +- Staging operations: don't panic on REMOVE (databricks/databricks-sql-go#205 by @candiduslynx) +- Fix formatting of Date/Time query parameters (databricks/databricks-sql-go#207 by @candiduslynx) + +## v1.5.3 (2024-01-17) +- Bug fix for ArrowBatchIterator.HasNext(). Incorrectly returned true for result sets with zero rows. + +## v1.5.2 (2023-11-17) +- Added .us domain to inference list for AWS OAuth +- Bug fix for OAuth m2m scopes, updated m2m authenticator to use "all-apis" scope. + +## v1.5.1 (2023-10-17) +- Logging improvements +- Added handling for staging remove + +## v1.5.0 (2023-10-02) +- Named parameter support +- Better handling of bad connection errors and specifying server protocol +- OAuth implementation +- Expose Arrow batches to users +- Add support for staging operations + +## v1.4.0 (2023-08-09) +- Improve error information when query terminates in unexpected state +- Do not override global logger time format +- Enable Transport configuration for http client +- fix: update arrow to v12 +- Updated doc.go for retrieving query id and connection id +- Bug fix issue 147: BUG with reading table that contains copied map +- Allow WithServerHostname to specify protocol + +## v1.3.1 (2023-06-23) + +- bug fix for panic when executing non record producing statements using DB.Query()/DB.QueryExec() + +## v1.3.0 (2023-06-07) + +- allow client provided authenticator +- more robust retry behaviour +- bug fix for null values in complex types + +## v1.2.0 (2023-04-20) + +- Improved error types and info + +## v1.1.0 (2023-03-06) + +- Feat: Support ability to retry on specific failures +- Fetch results in arrow format +- Improve error message and retry behaviour + +## v1.0.1 (2023-01-05) + +Fixing cancel race condition + +## v1.0.0 (2022-12-20) + +- Package doc (doc.go) +- Handle FLOAT values as float32 +- Fix for result.AffectedRows +- Use new ctx when closing operation after cancel +- Set default port to 443 + +## v1.0.0-rc.1 (2022-12-19) + +- Package doc (doc.go) +- Handle FLOAT values as float32 +- Fix for result.AffectedRows +- Add or edit documentation above methods +- Tweaks to readme +- Use new ctx when closing operation after cancel + +## 0.2.2 (2022-12-12) + +- Handle parsing negative years in dates +- fix thread safety issue + +## 0.2.1 (2022-12-05) + +- Don't ignore error in InitThriftClient +- Close optimization for Rows +- Close operation after executing statement +- Minor change to examples +- P&R improvements + +## 0.1.x (Unreleased) + +- Fix thread safety issue in connector + +## 0.2.0 (2022-11-18) + +- Support for DirectResults +- Support for context cancellation and timeout +- Session parameters (e.g.: timezone) +- Thrift Protocol update +- Several logging improvements +- Added better examples. See [workflow](https://github.com/databricks/databricks-sql-go/blob/main/examples/workflow/main.go) +- Added dbsql.NewConnector() function to help initialize DB +- Many other small improvements and bug fixes +- Removed support for client-side query parameterization +- Removed need to start DSN with "databricks://" + +## 0.1.4 (2022-07-30) + +- Fix: Could not fetch rowsets greater than the value of `maxRows` (#18) +- Updated default user agent +- Updated README and CONTRIBUTING + +## 0.1.3 (2022-06-16) + +- Add escaping of string parameters. + +## 0.1.2 (2022-06-10) + +- Fix timeout units to be milliseconds instead of nanos. + +## 0.1.1 (2022-05-19) + +- Fix module name + +## 0.1.0 (2022-05-19) + +- Initial release diff --git a/vendor/github.com/databricks/databricks-sql-go/CONTRIBUTING.md b/vendor/github.com/databricks/databricks-sql-go/CONTRIBUTING.md new file mode 100644 index 00000000..3636621a --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/CONTRIBUTING.md @@ -0,0 +1,79 @@ +# Contributing Guide + +We happily welcome contributions to this package. We use [GitHub Issues](https://github.com/databricks/databricks-sql-go/issues) to track community reported issues and [GitHub Pull Requests](https://github.com/databricks/databricks-sql-go/pulls) for accepting changes. + +Contributions are licensed on a license-in/license-out basis. + +## Communication + +Before starting work on a major feature, please reach out to us via GitHub, Slack, email, etc. We will make sure no one else is already working on it and ask you to open a GitHub issue. +A "major feature" is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior. +We will use the GitHub issue to discuss the feature and come to agreement. +This is to prevent your time being wasted, as well as ours. +The GitHub review process for major features is also important so that organizations with commit access can come to agreement on design. +If it is appropriate to write a design document, the document must be hosted either in the GitHub tracking issue, or linked to from the issue and hosted in a world-readable location. +Specifically, if the goal is to add a new extension, please read the extension policy. +Small patches and bug fixes don't need prior communication. + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from developercertificate.org): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + +``` +Signed-off-by: Joe Smith +Use your real name (sorry, no pseudonyms or anonymous contributions.) +``` + +If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. + +## Pull Request Process + +1. Update the [CHANGELOG](CHANGELOG.md) with details of your changes, if applicable. +2. Add any appropriate tests. +3. Make your code or other changes. +4. Review guidelines such as + [How to write the perfect pull request][github-perfect-pr], thanks! + +[github-perfect-pr]: https://blog.github.com/2015-01-21-how-to-write-the-perfect-pull-request/ diff --git a/vendor/github.com/databricks/databricks-sql-go/LICENSE b/vendor/github.com/databricks/databricks-sql-go/LICENSE new file mode 100644 index 00000000..23356f3c --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Databricks, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/databricks/databricks-sql-go/Makefile b/vendor/github.com/databricks/databricks-sql-go/Makefile new file mode 100644 index 00000000..17070f59 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/Makefile @@ -0,0 +1,83 @@ +BINARY = databricks-sql-go +PACKAGE = github.com/databricks/$(BINARY) +VER_PREFIX = $(PACKAGE)/version +PRODUCTION ?= false +DATE = $(shell date "+%Y-%m-%d") +GIT_BRANCH = $(shell git rev-parse --abbrev-ref 2>/dev/null) +GIT_COMMIT = $(shell git rev-parse HEAD 2>/dev/null) +GIT_TAG = $(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi) +VERSIONREL = $(shell if [ -z "`git status --porcelain`" ]; then git rev-parse --short HEAD 2>/dev/null ; else echo "dirty"; fi) +PKGS = $(shell go list ./... | grep -v /vendor) +LDFLAGS = -X $(VER_PREFIX).Version=$(VERSIONREL) -X $(VER_PREFIX).Revision=$(GIT_COMMIT) -X $(VER_PREFIX).Branch=$(GIT_BRANCH) -X $(VER_PREFIX).BuildUser=$(shell whoami) -X $(VER_PREFIX).BuildDate=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +GOBUILD_ARGS = +GO = CGO_ENABLED=0 go +PLATFORMS = windows linux darwin +os = $(word 1, $@) + +TEST_RESULTS_DIR ?= ./test-results + + +ifneq (${GIT_TAG},) +override DOCKER_TAG = ${GIT_TAG} +override VERSIONREL = ${GIT_TAG} +endif + +ifeq (${PRODUCTION}, true) +LDFLAGS += -w -s -extldflags "-static" +GOBUILD_ARGS += -v +endif + +.PHONY: help +help: ## Show this help. + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +.PHONY: all +all: gen fmt lint test coverage ## format and test everything + +bin/golangci-lint: go.mod go.sum + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./bin v1.48.0 + +bin/gotestsum: go.mod go.sum + @mkdir -p bin/ + $(GO) build -o bin/gotestsum gotest.tools/gotestsum + +.PHONY: tools +tools: bin/golangci-lint bin/gotestsum ## Build the development tools + +.PHONY: fmt +fmt: ## Format the go code. + gofmt -w -s . + +.PHONY: lint +lint: bin/golangci-lint ## Lint the go code to ensure code sanity. + ./bin/golangci-lint run + +.PHONY: test +test: bin/gotestsum ## Run the go unit tests. + @echo "INFO: Running all go unit tests." + CGO_ENABLED=0 ./bin/gotestsum --format pkgname-and-test-fails --junitfile $(TEST_RESULTS_DIR)/unit-tests.xml ./... + +.PHONY: test-race +test-race: + @echo "INFO: Running all go unit tests checking for race conditions." + go test -race + + +.PHONY: coverage +coverage: bin/gotestsum ## Report the unit test code coverage. + @echo "INFO: Generating unit test coverage report." + CGO_ENABLED=0 ./bin/gotestsum --format pkgname-and-test-fails -- -coverprofile=$(TEST_RESULTS_DIR)/coverage.txt -covermode=atomic ./... + go tool cover -html=$(TEST_RESULTS_DIR)/coverage.txt -o $(TEST_RESULTS_DIR)/coverage.html + +.PHONY: sec +sec: lint ## Run the snyk vulnerability scans. + @echo "INFO: Running go vulnerability scans." + snyk test . + +.PHONY: build +build: linux darwin ## Build the multi-arch binaries + +.PHONY: $(PLATFORMS) +$(PLATFORMS): + mkdir -p bin + GOOS=$(os) GOARCH=amd64 $(GO) build $(GOBUILD_ARGS) -ldflags '$(LDFLAGS)' -o bin/$(BINARY)-$(os)-amd64 . diff --git a/vendor/github.com/databricks/databricks-sql-go/README.md b/vendor/github.com/databricks/databricks-sql-go/README.md new file mode 100644 index 00000000..d5522368 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/README.md @@ -0,0 +1,107 @@ +# Databricks SQL Driver for Go + + +![http://www.apache.org/licenses/LICENSE-2.0.txt](http://img.shields.io/:license-Apache%202-brightgreen.svg) + +## Description + +This repo contains a Databricks SQL Driver for Go's [database/sql](https://golang.org/pkg/database/sql) package. It can be used to connect and query Databricks clusters and SQL Warehouses. + +## Documentation + +See `doc.go` for full documentation or the Databrick's documentation for [SQL Driver for Go](https://docs.databricks.com/dev-tools/go-sql-driver.html). + +## Usage + +```go +import ( + "context" + "database/sql" + _ "github.com/databricks/databricks-sql-go" +) + +db, err := sql.Open("databricks", "token:********@********.databricks.com:443/sql/1.0/endpoints/********") +if err != nil { + panic(err) +} +defer db.Close() + + +rows, err := db.QueryContext(context.Background(), "SELECT 1") +defer rows.Close() +``` + +Additional usage examples are available [here](https://github.com/databricks/databricks-sql-go/tree/main/examples). + +### Connecting with DSN (Data Source Name) + +The DSN format is: + +``` +token:[your token]@[Workspace hostname]:[Port number][Endpoint HTTP Path]?param=value +``` + +You can set query timeout value by appending a `timeout` query parameter (in seconds) and you can set max rows to retrieve per network request by setting the `maxRows` query parameter: + +``` +token:[your token]@[Workspace hostname]:[Port number][Endpoint HTTP Path]?timeout=1000&maxRows=1000 +``` +You can turn on Cloud Fetch to increase the performance of extracting large query results by fetching data in parallel via cloud storage (more info [here](https://www.databricks.com/blog/2021/08/11/how-we-achieved-high-bandwidth-connectivity-with-bi-tools.html)). To turn on Cloud Fetch, append `useCloudFetch=true`. You can also set the number of concurrently fetching goroutines by setting the `maxDownloadThreads` query parameter (default is 10): +``` +token:[your token]@[Workspace hostname]:[Port number][Endpoint HTTP Path]?useCloudFetch=true&maxDownloadThreads=3 +``` + +### Connecting with a new Connector + +You can also connect with a new connector object. For example: + +```go +import ( +"database/sql" + _ "github.com/databricks/databricks-sql-go" +) + +connector, err := dbsql.NewConnector( + dbsql.WithServerHostname(), + dbsql.WithPort(), + dbsql.WithHTTPPath(), + dbsql.WithAccessToken() +) +if err != nil { + log.Fatal(err) +} +db := sql.OpenDB(connector) +defer db.Close() +``` + +View `doc.go` or `connector.go` to understand all the functional options available when creating a new connector object. + +## Develop + +### Lint +We use `golangci-lint` as the lint tool. If you use vs code, just add the following settings: +``` json +{ + "go.lintTool": "golangci-lint", + "go.lintFlags": [ + "--fast" + ] +} +``` +### Unit Tests + +```bash +go test +``` + +## Issues + +If you find any issues, feel free to create an issue or send a pull request directly. + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) + +## License + +[Apache 2.0](https://github.com/databricks/databricks-sql-go/blob/main/LICENSE) diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/auth.go b/vendor/github.com/databricks/databricks-sql-go/auth/auth.go new file mode 100644 index 00000000..efbbcb76 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/auth.go @@ -0,0 +1,40 @@ +package auth + +import ( + "net/http" + "strings" +) + +type Authenticator interface { + Authenticate(*http.Request) error +} + +type AuthType int + +const ( + AuthTypeUnknown AuthType = iota + AuthTypePat + AuthTypeOauthU2M + AuthTypeOauthM2M +) + +var authTypeNames []string = []string{"Unknown", "Pat", "OauthU2M", "OauthM2M"} + +func (at AuthType) String() string { + if at >= 0 && int(at) < len(authTypeNames) { + return authTypeNames[at] + } + + return authTypeNames[0] +} + +func ParseAuthType(typeString string) AuthType { + typeString = strings.ToLower(typeString) + for i, n := range authTypeNames { + if strings.ToLower(n) == typeString { + return AuthType(i) + } + } + + return AuthTypeUnknown +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/noop/noop.go b/vendor/github.com/databricks/databricks-sql-go/auth/noop/noop.go new file mode 100644 index 00000000..c9d4c7c8 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/noop/noop.go @@ -0,0 +1,12 @@ +package noop + +import ( + "net/http" +) + +type NoopAuth struct { +} + +func (a *NoopAuth) Authenticate(r *http.Request) error { + return nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/m2m/m2m.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/m2m/m2m.go new file mode 100644 index 00000000..fd67a1fd --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/m2m/m2m.go @@ -0,0 +1,100 @@ +package m2m + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/databricks/databricks-sql-go/auth" + "github.com/databricks/databricks-sql-go/auth/oauth" + "github.com/rs/zerolog/log" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +func NewAuthenticator(clientID, clientSecret, hostName string) auth.Authenticator { + return NewAuthenticatorWithScopes(clientID, clientSecret, hostName, []string{}) +} + +func NewAuthenticatorWithScopes(clientID, clientSecret, hostName string, scopes []string) auth.Authenticator { + scopes = GetScopes(hostName, scopes) + return &authClient{ + clientID: clientID, + clientSecret: clientSecret, + hostName: hostName, + scopes: scopes, + } +} + +type authClient struct { + clientID string + clientSecret string + hostName string + scopes []string + tokenSource oauth2.TokenSource + mx sync.Mutex +} + +// Auth will start the OAuth Authorization Flow to authenticate the cli client +// using the users credentials in the browser. Compatible with SSO. +func (c *authClient) Authenticate(r *http.Request) error { + c.mx.Lock() + defer c.mx.Unlock() + if c.tokenSource != nil { + token, err := c.tokenSource.Token() + if err != nil { + return err + } + token.SetAuthHeader(r) + return nil + } + + config, err := GetConfig(context.Background(), c.hostName, c.clientID, c.clientSecret, c.scopes) + if err != nil { + return fmt.Errorf("unable to generate clientCredentials.Config: %w", err) + } + + c.tokenSource = GetTokenSource(config) + token, err := c.tokenSource.Token() + log.Info().Msgf("token fetched successfully") + if err != nil { + log.Err(err).Msg("failed to get token") + + return err + } + token.SetAuthHeader(r) + + return nil + +} + +func GetTokenSource(config clientcredentials.Config) oauth2.TokenSource { + tokenSource := config.TokenSource(context.Background()) + return tokenSource +} + +func GetConfig(ctx context.Context, issuerURL, clientID, clientSecret string, scopes []string) (clientcredentials.Config, error) { + // Get the endpoint based on the host name + endpoint, err := oauth.GetEndpoint(ctx, issuerURL) + if err != nil { + return clientcredentials.Config{}, fmt.Errorf("could not lookup provider details: %w", err) + } + + config := clientcredentials.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + TokenURL: endpoint.TokenURL, + Scopes: scopes, + } + + return config, nil +} + +func GetScopes(hostName string, scopes []string) []string { + if !oauth.HasScope(scopes, "all-apis") { + scopes = append(scopes, "all-apis") + } + + return scopes +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/oauth.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/oauth.go new file mode 100644 index 00000000..0df9d5c4 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/oauth.go @@ -0,0 +1,148 @@ +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "golang.org/x/oauth2" +) + +var azureTenants = map[string]string{ + ".dev.azuredatabricks.net": "62a912ac-b58e-4c1d-89ea-b2dbfc7358fc", + ".staging.azuredatabricks.net": "4a67d088-db5c-48f1-9ff2-0aace800ae68", + ".azuredatabricks.net": "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d", + ".databricks.azure.us": "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d", + ".databricks.azure.cn": "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d", +} + +func GetEndpoint(ctx context.Context, hostName string) (oauth2.Endpoint, error) { + if ctx == nil { + ctx = context.Background() + } + + cloud := InferCloudFromHost(hostName) + + if cloud == Unknown { + return oauth2.Endpoint{}, errors.New("unsupported cloud type") + } + + if cloud == Azure { + authURL := fmt.Sprintf("https://%s/oidc/oauth2/v2.0/authorize", hostName) + tokenURL := fmt.Sprintf("https://%s/oidc/oauth2/v2.0/token", hostName) + return oauth2.Endpoint{AuthURL: authURL, TokenURL: tokenURL}, nil + } + + issuerURL := fmt.Sprintf("https://%s/oidc", hostName) + ctx = oidc.InsecureIssuerURLContext(ctx, issuerURL) + provider, err := oidc.NewProvider(ctx, issuerURL) + if err != nil { + return oauth2.Endpoint{}, err + } + + endpoint := provider.Endpoint() + + return endpoint, err +} + +func GetScopes(hostName string, scopes []string) []string { + for _, s := range []string{oidc.ScopeOfflineAccess} { + if !HasScope(scopes, s) { + scopes = append(scopes, s) + } + } + + cloudType := InferCloudFromHost(hostName) + if cloudType == Azure { + userImpersonationScope := fmt.Sprintf("%s/user_impersonation", azureTenants[GetAzureDnsZone(hostName)]) + if !HasScope(scopes, userImpersonationScope) { + scopes = append(scopes, userImpersonationScope) + } + } else { + if !HasScope(scopes, "sql") { + scopes = append(scopes, "sql") + } + } + + return scopes +} + +func HasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + +var databricksAWSDomains []string = []string{ + ".cloud.databricks.com", + ".cloud.databricks.us", + ".dev.databricks.com", +} + +var databricksAzureDomains []string = []string{ + ".azuredatabricks.net", + ".databricks.azure.cn", + ".databricks.azure.us", +} + +var databricksGCPDomains []string = []string{ + ".gcp.databricks.com", +} + +type CloudType int + +const ( + AWS = iota + Azure + GCP + Unknown +) + +func (cl CloudType) String() string { + switch cl { + case AWS: + return "AWS" + case Azure: + return "Azure" + case GCP: + return "GCP" + } + + return "Unknown" +} + +func InferCloudFromHost(hostname string) CloudType { + + for _, d := range databricksAzureDomains { + if strings.Contains(hostname, d) { + return Azure + } + } + + for _, d := range databricksAWSDomains { + if strings.Contains(hostname, d) { + return AWS + } + } + + for _, d := range databricksGCPDomains { + if strings.Contains(hostname, d) { + return GCP + } + } + return Unknown +} + +func GetAzureDnsZone(hostname string) string { + for _, d := range databricksAzureDomains { + if strings.Contains(hostname, d) { + return d + } + } + return "" +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/pkce/pkce.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/pkce/pkce.go new file mode 100644 index 00000000..b9f3368f --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/pkce/pkce.go @@ -0,0 +1,47 @@ +package pkce + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + + "golang.org/x/oauth2" +) + +// Generate generates a new random PKCE code. +func Generate() (Code, error) { return generate(rand.Reader) } + +func generate(rand io.Reader) (Code, error) { + // From https://tools.ietf.org/html/rfc7636#section-4.1: + // code_verifier = high-entropy cryptographic random STRING using the + // unreserved characters [A-Z] / [a-z] / [0-9] / "-" / "." / "_" / "~" + // from Section 2.3 of [RFC3986], with a minimum length of 43 characters + // and a maximum length of 128 characters. + var buf [32]byte + if _, err := io.ReadFull(rand, buf[:]); err != nil { + return "", fmt.Errorf("could not generate PKCE code: %w", err) + } + return Code(hex.EncodeToString(buf[:])), nil +} + +// Code implements the basic options required for RFC 7636: Proof Key for Code Exchange (PKCE). +type Code string + +// Challenge returns the OAuth2 auth code parameter for sending the PKCE code challenge. +func (p *Code) Challenge() oauth2.AuthCodeOption { + b := sha256.Sum256([]byte(*p)) + return oauth2.SetAuthURLParam("code_challenge", base64.RawURLEncoding.EncodeToString(b[:])) +} + +// Method returns the OAuth2 auth code parameter for sending the PKCE code challenge method. +func (p *Code) Method() oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("code_challenge_method", "S256") +} + +// Verifier returns the OAuth2 auth code parameter for sending the PKCE code verifier. +func (p *Code) Verifier() oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("code_verifier", string(*p)) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/authenticator.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/authenticator.go new file mode 100644 index 00000000..ba9d4d72 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/authenticator.go @@ -0,0 +1,274 @@ +package u2m + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "sync" + "time" + + "github.com/pkg/browser" + + "github.com/databricks/databricks-sql-go/auth" + "github.com/databricks/databricks-sql-go/auth/oauth" + "github.com/rs/zerolog/log" + "golang.org/x/oauth2" +) + +const ( + azureClientId = "96eecda7-19ea-49cc-abb5-240097d554f5" + azureRedirectURL = "localhost:8030" + + awsClientId = "databricks-sql-connector" + awsRedirectURL = "localhost:8030" + + gcpClientId = "databricks-sql-connector" + gcpRedirectURL = "localhost:8030" +) + +func NewAuthenticator(hostName string, timeout time.Duration) (auth.Authenticator, error) { + + cloud := oauth.InferCloudFromHost(hostName) + + var clientID, redirectURL string + if cloud == oauth.AWS { + clientID = awsClientId + redirectURL = awsRedirectURL + } else if cloud == oauth.Azure { + clientID = azureClientId + redirectURL = azureRedirectURL + } else if cloud == oauth.GCP { + clientID = gcpClientId + redirectURL = gcpRedirectURL + } else { + return nil, errors.New("unhandled cloud type: " + cloud.String()) + } + + // Get an oauth2 config + config, err := GetConfig(context.Background(), hostName, clientID, "", redirectURL, nil) + if err != nil { + return nil, fmt.Errorf("unable to generate oauth2.Config: %w", err) + } + + tsp, err := GetTokenSourceProvider(context.Background(), config, timeout) + + return &u2mAuthenticator{ + clientID: clientID, + hostName: hostName, + tsp: tsp, + }, err +} + +type u2mAuthenticator struct { + clientID string + hostName string + // scopes []string + tokenSource oauth2.TokenSource + tsp *tokenSourceProvider + mx sync.Mutex +} + +// Auth will start the OAuth Authorization Flow to authenticate the cli client +// using the users credentials in the browser. Compatible with SSO. +func (c *u2mAuthenticator) Authenticate(r *http.Request) error { + c.mx.Lock() + defer c.mx.Unlock() + if c.tokenSource != nil { + token, err := c.tokenSource.Token() + if err == nil { + token.SetAuthHeader(r) + return nil + } else if !strings.Contains(err.Error(), "invalid_grant") { + return err + } + + token.SetAuthHeader(r) + return nil + } + + tokenSource, err := c.tsp.GetTokenSource() + if err != nil { + return fmt.Errorf("unable to get token source: %w", err) + } + c.tokenSource = tokenSource + + token, err := tokenSource.Token() + if err != nil { + return fmt.Errorf("unable to get token source: %w", err) + } + + token.SetAuthHeader(r) + + return nil +} + +type authResponse struct { + err string + details string + state string + code string +} + +type tokenSourceProvider struct { + timeout time.Duration + state string + sigintCh chan os.Signal + authDoneCh chan authResponse + redirectURL *url.URL + config oauth2.Config +} + +func (tsp *tokenSourceProvider) GetTokenSource() (oauth2.TokenSource, error) { + state, err := randString(16) + if err != nil { + err = fmt.Errorf("unable to generate random number: %w", err) + return nil, err + } + + challenge, challengeMethod, verifier, err := GetAuthCodeOptions() + if err != nil { + return nil, err + } + + loginURL := tsp.config.AuthCodeURL(state, challenge, challengeMethod) + tsp.state = state + + log.Info().Msgf("listening on %s://%s/", tsp.redirectURL.Scheme, tsp.redirectURL.Host) + listener, err := net.Listen("tcp", tsp.redirectURL.Host) + if err != nil { + return nil, err + } + defer listener.Close() + + srv := &http.Server{ + ReadHeaderTimeout: 3 * time.Second, + WriteTimeout: 30 * time.Second, + } + + defer srv.Close() + + // Start local server to wait for callback + go func() { + err := srv.Serve(listener) + + // in case port is in use + if err != nil && err != http.ErrServerClosed { + tsp.authDoneCh <- authResponse{err: err.Error()} + } + }() + + fmt.Printf("\nOpen URL in Browser to Continue: %s\n\n", loginURL) + err = browser.OpenURL(loginURL) + if err != nil { + fmt.Println("Unable to open browser automatically. Please open manually: ", loginURL) + } + + ctx := context.Background() + // Wait for callback to be received, Wait for either the callback to finish, SIGINT to be received or up to 2 minutes + select { + case authResponse := <-tsp.authDoneCh: + if authResponse.err != "" { + return nil, fmt.Errorf("identity provider error: %s: %s", authResponse.err, authResponse.details) + } + token, err := tsp.config.Exchange(ctx, authResponse.code, verifier) + if err != nil { + return nil, fmt.Errorf("failed to exchange token: %w", err) + } + + return tsp.config.TokenSource(ctx, token), nil + + case <-tsp.sigintCh: + return nil, errors.New("interrupted while waiting for auth callback") + + case <-time.After(tsp.timeout): + return nil, errors.New("timed out waiting for response from provider") + } +} + +func (tsp *tokenSourceProvider) ServeHTTP(w http.ResponseWriter, r *http.Request) { + resp := authResponse{ + err: r.URL.Query().Get("error"), + details: r.URL.Query().Get("error_description"), + state: r.URL.Query().Get("state"), + code: r.URL.Query().Get("code"), + } + + // Send the response back to the to cli + defer func() { tsp.authDoneCh <- resp }() + + // Do some checking of the response here to show more relevant content + if resp.err != "" { + log.Error().Msg(resp.err) + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(errorHTML("Identity Provider returned an error: " + resp.err))) + if err != nil { + log.Error().Err(err).Msg("unable to write error response") + } + return + } + if resp.state != tsp.state && r.URL.String() != "/favicon.ico" { + msg := "Authentication state received did not match original request. Please try to login again." + log.Error().Msg(msg) + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(errorHTML(msg))) + if err != nil { + log.Error().Err(err).Msg("unable to write error response") + } + return + } + + _, err := w.Write([]byte(infoHTML("CLI Login Success", "You may close this window anytime now and go back to terminal"))) + if err != nil { + log.Error().Err(err).Msg("unable to write success response") + } +} + +var register sync.Once = sync.Once{} + +func GetTokenSourceProvider(ctx context.Context, config oauth2.Config, timeout time.Duration) (*tokenSourceProvider, error) { + if timeout == 0 { + timeout = 2 * time.Minute + } + + // handle ctrl-c while waiting for the callback + sigintCh := make(chan os.Signal, 1) + signal.Notify(sigintCh, os.Interrupt) + + // receive auth callback response + authDoneCh := make(chan authResponse) + + u, _ := url.Parse(config.RedirectURL) + if u.Path == "" { + u.Path = "/" + } + + tsp := &tokenSourceProvider{ + timeout: timeout, + sigintCh: sigintCh, + authDoneCh: authDoneCh, + redirectURL: u, + config: config, + } + + f := func() { http.Handle(u.Path, tsp) } + register.Do(f) + + return tsp, nil +} + +func randString(nByte int) (string, error) { + b := make([]byte, nByte) + if _, err := io.ReadFull(rand.Reader, b); err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/html_template.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/html_template.go new file mode 100644 index 00000000..69107b3e --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/html_template.go @@ -0,0 +1,56 @@ +package u2m + +import ( + "bytes" + _ "embed" + "html/template" +) + +type SimplePage struct { + Title string + Heading string + Content string + Action ActionLink + Code string +} + +type ActionLink struct { + Label string + Link string +} + +var ( + //go:embed templates/simple.html + simpleHtmlPage string +) + +func renderHTML(data SimplePage) (string, error) { + var out bytes.Buffer + tmpl, err := template.New("name").Parse(simpleHtmlPage) + if err != nil { + return "", err + } + err = tmpl.Execute(&out, data) + return out.String(), err +} + +func infoHTML(title, content string) string { + data := SimplePage{ + Title: "Authentication Success", + Heading: title, + Content: content, + } + out, _ := renderHTML(data) + return out +} + +func errorHTML(msg string) string { + data := SimplePage{ + Title: "Authentication Error", + Heading: "Ooops!", + Content: "Sorry, Databricks could not authenticate to your account due to some server errors. Please try it later.", + Code: msg, + } + out, _ := renderHTML(data) + return out +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/templates/simple.html b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/templates/simple.html new file mode 100644 index 00000000..71b0336b --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/templates/simple.html @@ -0,0 +1,100 @@ + + + + + + {{ .Title }} + + + + + + + +
+
+ +
{{ .Heading }}
+
{{ .Content }}
+ + {{ .Action.Label }} + + + {{ .Code }} + +
+
+ + + \ No newline at end of file diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/u2m.go b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/u2m.go new file mode 100644 index 00000000..456e369a --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/oauth/u2m/u2m.go @@ -0,0 +1,51 @@ +package u2m + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/databricks-sql-go/auth/oauth" + "github.com/databricks/databricks-sql-go/auth/oauth/pkce" + "golang.org/x/oauth2" +) + +func GetConfig(ctx context.Context, hostName, clientID, clientSecret, callbackURL string, scopes []string) (oauth2.Config, error) { + // Add necessary scopes for AWS or Azure + scopes = oauth.GetScopes(hostName, scopes) + + // Get the endpoint based on the host name + endpoint, err := oauth.GetEndpoint(ctx, hostName) + if err != nil { + return oauth2.Config{}, fmt.Errorf("could not lookup provider details: %w", err) + } + + if !strings.HasPrefix(callbackURL, "http") { + callbackURL = fmt.Sprintf("http://%s", callbackURL) + } + + config := oauth2.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + Endpoint: endpoint, + RedirectURL: callbackURL, + Scopes: scopes, + } + + return config, nil +} + +func GetAuthCodeOptions() (challenge, challengeMethod, verifier oauth2.AuthCodeOption, err error) { + code, err := pkce.Generate() + if err != nil { + return + } + + return code.Challenge(), code.Method(), code.Verifier(), err +} + +func GetLoginURL(config oauth2.Config, state string, options ...oauth2.AuthCodeOption) string { + loginURL := config.AuthCodeURL(state, options...) + + return loginURL +} diff --git a/vendor/github.com/databricks/databricks-sql-go/auth/pat/pat.go b/vendor/github.com/databricks/databricks-sql-go/auth/pat/pat.go new file mode 100644 index 00000000..73e12bf2 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/auth/pat/pat.go @@ -0,0 +1,20 @@ +package pat + +import ( + "fmt" + "net/http" + + "github.com/pkg/errors" +) + +type PATAuth struct { + AccessToken string +} + +func (a *PATAuth) Authenticate(r *http.Request) error { + if a.AccessToken == "" { + return errors.New("invalid token") + } + r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", a.AccessToken)) + return nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/connection.go b/vendor/github.com/databricks/databricks-sql-go/connection.go new file mode 100644 index 00000000..b3d04792 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/connection.go @@ -0,0 +1,620 @@ +package dbsql + +import ( + "context" + "database/sql/driver" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/databricks/databricks-sql-go/driverctx" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/client" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/internal/rows" + "github.com/databricks/databricks-sql-go/internal/sentinel" + "github.com/databricks/databricks-sql-go/logger" + "github.com/pkg/errors" +) + +type conn struct { + id string + cfg *config.Config + client cli_service.TCLIService + session *cli_service.TOpenSessionResp +} + +// Prepare prepares a statement with the query bound to this connection. +func (c *conn) Prepare(query string) (driver.Stmt, error) { + return &stmt{conn: c, query: query}, nil +} + +// PrepareContext prepares a statement with the query bound to this connection. +// Currently, PrepareContext does not use context and is functionally equivalent to Prepare. +func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + return &stmt{conn: c, query: query}, nil +} + +// Close closes the session. +// sql package maintains a free pool of connections and only calls Close when there's a surplus of idle connections. +func (c *conn) Close() error { + log := logger.WithContext(c.id, "", "") + ctx := driverctx.NewContextWithConnId(context.Background(), c.id) + + _, err := c.client.CloseSession(ctx, &cli_service.TCloseSessionReq{ + SessionHandle: c.session.SessionHandle, + }) + + if err != nil { + log.Err(err).Msg("databricks: failed to close connection") + return dbsqlerrint.NewRequestError(ctx, dbsqlerr.ErrCloseConnection, err) + } + return nil +} + +// Not supported in Databricks. +func (c *conn) Begin() (driver.Tx, error) { + return nil, dbsqlerrint.NewDriverError(context.TODO(), dbsqlerr.ErrNotImplemented, nil) +} + +// Not supported in Databricks. +func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + return nil, dbsqlerrint.NewDriverError(context.TODO(), dbsqlerr.ErrNotImplemented, nil) +} + +// Ping attempts to verify that the server is accessible. +// Returns ErrBadConn if ping fails and consequently DB.Ping will remove the conn from the pool. +func (c *conn) Ping(ctx context.Context) error { + ctx = driverctx.NewContextWithConnId(ctx, c.id) + log, _ := client.LoggerAndContext(ctx, nil) + log.Debug().Msg("databricks: pinging") + + ctx1, cancel := context.WithTimeout(ctx, c.cfg.PingTimeout) + defer cancel() + rows, err := c.QueryContext(ctx1, "select 1", nil) + if err != nil { + log.Err(err).Msg("databricks: failed to ping") + return dbsqlerrint.NewBadConnectionError(err) + } + defer rows.Close() + + log.Debug().Msg("databricks: ping successful") + return nil +} + +// ResetSession is called prior to executing a query on the connection. +// The session with this driver does not have any important state to reset before re-use. +func (c *conn) ResetSession(ctx context.Context) error { + return nil +} + +// IsValid signals whether a connection is valid or if it should be discarded. +func (c *conn) IsValid() bool { + return c.session.GetStatus().StatusCode == cli_service.TStatusCode_SUCCESS_STATUS +} + +// ExecContext executes a query that doesn't return rows, such +// as an INSERT or UPDATE. +// +// ExecContext honors the context timeout and return when it is canceled. +// Statement ExecContext is the same as connection ExecContext +func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + ctx = driverctx.NewContextWithConnId(ctx, c.id) + log, _ := client.LoggerAndContext(ctx, nil) + msg, start := logger.Track("ExecContext") + defer log.Duration(msg, start) + + corrId := driverctx.CorrelationIdFromContext(ctx) + + exStmtResp, opStatusResp, err := c.runQuery(ctx, query, args) + log, ctx = client.LoggerAndContext(ctx, exStmtResp) + stagingErr := c.execStagingOperation(exStmtResp, ctx) + + if exStmtResp != nil && exStmtResp.OperationHandle != nil { + // since we have an operation handle we can close the operation if necessary + alreadyClosed := exStmtResp.DirectResults != nil && exStmtResp.DirectResults.CloseOperation != nil + newCtx := driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), c.id), corrId) + if !alreadyClosed && (opStatusResp == nil || opStatusResp.GetOperationState() != cli_service.TOperationState_CLOSED_STATE) { + _, err1 := c.client.CloseOperation(newCtx, &cli_service.TCloseOperationReq{ + OperationHandle: exStmtResp.OperationHandle, + }) + if err1 != nil { + log.Err(err1).Msg("databricks: failed to close operation after executing statement") + } + } + } + + if err != nil { + log.Err(err).Msgf("databricks: failed to execute query: query %s", query) + return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, err, opStatusResp) + } + + if stagingErr != nil { + log.Err(stagingErr).Msgf("databricks: failed to execute query: query %s", query) + return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, err, opStatusResp) + } + + res := result{AffectedRows: opStatusResp.GetNumModifiedRows()} + + return &res, nil +} + +// QueryContext executes a query that may return rows, such as a +// SELECT. +// +// QueryContext honors the context timeout and return when it is canceled. +// Statement QueryContext is the same as connection QueryContext +func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + ctx = driverctx.NewContextWithConnId(ctx, c.id) + log, _ := client.LoggerAndContext(ctx, nil) + msg, start := log.Track("QueryContext") + + // first we try to get the results synchronously. + // at any point in time that the context is done we must cancel and return + exStmtResp, opStatusResp, err := c.runQuery(ctx, query, args) + log, ctx = client.LoggerAndContext(ctx, exStmtResp) + defer log.Duration(msg, start) + + if err != nil { + log.Err(err).Msg("databricks: failed to run query") // To log query we need to redact credentials + return nil, dbsqlerrint.NewExecutionError(ctx, dbsqlerr.ErrQueryExecution, err, opStatusResp) + } + + corrId := driverctx.CorrelationIdFromContext(ctx) + rows, err := rows.NewRows(c.id, corrId, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults) + + return rows, err + +} + +func (c *conn) runQuery(ctx context.Context, query string, args []driver.NamedValue) (*cli_service.TExecuteStatementResp, *cli_service.TGetOperationStatusResp, error) { + // first we try to get the results synchronously. + // at any point in time that the context is done we must cancel and return + exStmtResp, err := c.executeStatement(ctx, query, args) + var log *logger.DBSQLLogger + log, ctx = client.LoggerAndContext(ctx, exStmtResp) + + if err != nil { + return exStmtResp, nil, err + } + + opHandle := exStmtResp.OperationHandle + + if exStmtResp.DirectResults != nil { + opStatus := exStmtResp.DirectResults.GetOperationStatus() + + switch opStatus.GetOperationState() { + // terminal states + // good + case cli_service.TOperationState_FINISHED_STATE: + return exStmtResp, opStatus, nil + // bad + case cli_service.TOperationState_CANCELED_STATE, + cli_service.TOperationState_CLOSED_STATE, + cli_service.TOperationState_ERROR_STATE, + cli_service.TOperationState_TIMEDOUT_STATE: + logBadQueryState(log, opStatus) + return exStmtResp, opStatus, unexpectedOperationState(opStatus) + // live states + case cli_service.TOperationState_INITIALIZED_STATE, + cli_service.TOperationState_PENDING_STATE, + cli_service.TOperationState_RUNNING_STATE: + statusResp, err := c.pollOperation(ctx, opHandle) + if err != nil { + return exStmtResp, statusResp, err + } + switch statusResp.GetOperationState() { + // terminal states + // good + case cli_service.TOperationState_FINISHED_STATE: + return exStmtResp, statusResp, nil + // bad + case cli_service.TOperationState_CANCELED_STATE, + cli_service.TOperationState_CLOSED_STATE, + cli_service.TOperationState_ERROR_STATE, + cli_service.TOperationState_TIMEDOUT_STATE: + logBadQueryState(log, statusResp) + return exStmtResp, statusResp, unexpectedOperationState(statusResp) + // live states + default: + logBadQueryState(log, statusResp) + return exStmtResp, statusResp, invalidOperationState(ctx, statusResp) + } + // weird states + default: + logBadQueryState(log, opStatus) + return exStmtResp, opStatus, invalidOperationState(ctx, opStatus) + } + + } else { + statusResp, err := c.pollOperation(ctx, opHandle) + if err != nil { + return exStmtResp, statusResp, err + } + switch statusResp.GetOperationState() { + // terminal states + // good + case cli_service.TOperationState_FINISHED_STATE: + return exStmtResp, statusResp, nil + // bad + case cli_service.TOperationState_CANCELED_STATE, + cli_service.TOperationState_CLOSED_STATE, + cli_service.TOperationState_ERROR_STATE, + cli_service.TOperationState_TIMEDOUT_STATE: + logBadQueryState(log, statusResp) + return exStmtResp, statusResp, unexpectedOperationState(statusResp) + // live states + default: + logBadQueryState(log, statusResp) + return exStmtResp, statusResp, invalidOperationState(ctx, statusResp) + } + } +} + +func logBadQueryState(log *logger.DBSQLLogger, opStatus *cli_service.TGetOperationStatusResp) { + log.Error().Msgf("databricks: query state: %s", opStatus.GetOperationState()) + log.Error().Msg(opStatus.GetDisplayMessage()) + log.Debug().Msg(opStatus.GetDiagnosticInfo()) +} + +func unexpectedOperationState(opStatus *cli_service.TGetOperationStatusResp) error { + return errors.WithMessage(errors.New(opStatus.GetDisplayMessage()), dbsqlerr.ErrUnexpectedOperationState(opStatus.GetOperationState().String())) +} + +func invalidOperationState(ctx context.Context, opStatus *cli_service.TGetOperationStatusResp) error { + return dbsqlerrint.NewDriverError(ctx, dbsqlerr.ErrInvalidOperationState(opStatus.GetOperationState().String()), nil) +} + +func (c *conn) executeStatement(ctx context.Context, query string, args []driver.NamedValue) (*cli_service.TExecuteStatementResp, error) { + ctx = driverctx.NewContextWithConnId(ctx, c.id) + + req := cli_service.TExecuteStatementReq{ + SessionHandle: c.session.SessionHandle, + Statement: query, + RunAsync: true, + QueryTimeout: int64(c.cfg.QueryTimeout / time.Second), + GetDirectResults: &cli_service.TSparkGetDirectResults{ + MaxRows: int64(c.cfg.MaxRows), + }, + CanDecompressLZ4Result_: &c.cfg.UseLz4Compression, + Parameters: convertNamedValuesToSparkParams(args), + } + + if c.cfg.UseArrowBatches { + req.CanReadArrowResult_ = &c.cfg.UseArrowBatches + req.UseArrowNativeTypes = &cli_service.TSparkArrowTypes{ + DecimalAsArrow: &c.cfg.UseArrowNativeDecimal, + TimestampAsArrow: &c.cfg.UseArrowNativeTimestamp, + ComplexTypesAsArrow: &c.cfg.UseArrowNativeComplexTypes, + IntervalTypesAsArrow: &c.cfg.UseArrowNativeIntervalTypes, + } + } + + if c.cfg.UseCloudFetch { + req.CanDownloadResult_ = &c.cfg.UseCloudFetch + } + + resp, err := c.client.ExecuteStatement(ctx, &req) + var log *logger.DBSQLLogger + log, ctx = client.LoggerAndContext(ctx, resp) + + var shouldCancel = func(resp *cli_service.TExecuteStatementResp) bool { + if resp == nil { + return false + } + hasHandle := resp.OperationHandle != nil + isOpen := resp.DirectResults == nil || resp.DirectResults.CloseOperation == nil + return hasHandle && isOpen + } + + select { + default: + case <-ctx.Done(): + newCtx := driverctx.NewContextFromBackground(ctx) + // in case context is done, we need to cancel the operation if necessary + if err == nil && shouldCancel(resp) { + log.Debug().Msg("databricks: canceling query") + _, err1 := c.client.CancelOperation(newCtx, &cli_service.TCancelOperationReq{ + OperationHandle: resp.GetOperationHandle(), + }) + + if err1 != nil { + log.Err(err1).Msgf("databricks: cancel failed") + } else { + log.Debug().Msgf("databricks: cancel success") + } + } else { + log.Debug().Msg("databricks: query did not need cancellation") + } + return nil, ctx.Err() + } + + return resp, err +} + +func (c *conn) pollOperation(ctx context.Context, opHandle *cli_service.TOperationHandle) (*cli_service.TGetOperationStatusResp, error) { + corrId := driverctx.CorrelationIdFromContext(ctx) + log := logger.WithContext(c.id, corrId, client.SprintGuid(opHandle.OperationId.GUID)) + var statusResp *cli_service.TGetOperationStatusResp + ctx = driverctx.NewContextWithConnId(ctx, c.id) + newCtx := driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), c.id), corrId) + pollSentinel := sentinel.Sentinel{ + OnDoneFn: func(statusResp any) (any, error) { + return statusResp, nil + }, + StatusFn: func() (sentinel.Done, any, error) { + var err error + log.Debug().Msg("databricks: polling status") + statusResp, err = c.client.GetOperationStatus(newCtx, &cli_service.TGetOperationStatusReq{ + OperationHandle: opHandle, + }) + + if statusResp != nil && statusResp.OperationState != nil { + log.Debug().Msgf("databricks: status %s", statusResp.GetOperationState().String()) + } + return func() bool { + if err != nil { + return true + } + switch statusResp.GetOperationState() { + case cli_service.TOperationState_INITIALIZED_STATE, + cli_service.TOperationState_PENDING_STATE, + cli_service.TOperationState_RUNNING_STATE: + return false + default: + log.Debug().Msg("databricks: polling done") + return true + } + }, statusResp, err + }, + OnCancelFn: func() (any, error) { + log.Debug().Msg("databricks: sentinel canceling query") + ret, err := c.client.CancelOperation(newCtx, &cli_service.TCancelOperationReq{ + OperationHandle: opHandle, + }) + return ret, err + }, + } + status, resp, err := pollSentinel.Watch(ctx, c.cfg.PollInterval, 0) + if err != nil { + log.Err(err).Msg("error polling operation status") + if status == sentinel.WatchTimeout { + err = dbsqlerrint.NewRequestError(ctx, dbsqlerr.ErrSentinelTimeout, err) + } + return nil, err + } + + statusResp, ok := resp.(*cli_service.TGetOperationStatusResp) + if !ok { + return nil, dbsqlerrint.NewDriverError(ctx, dbsqlerr.ErrReadQueryStatus, nil) + } + return statusResp, nil +} + +func (c *conn) CheckNamedValue(nv *driver.NamedValue) error { + var err error + if parameter, ok := nv.Value.(Parameter); ok { + nv.Name = parameter.Name + parameter.Value, err = driver.DefaultParameterConverter.ConvertValue(parameter.Value) + return err + } + + nv.Value, err = driver.DefaultParameterConverter.ConvertValue(nv.Value) + return err +} + +var _ driver.Conn = (*conn)(nil) +var _ driver.Pinger = (*conn)(nil) +var _ driver.SessionResetter = (*conn)(nil) +var _ driver.Validator = (*conn)(nil) +var _ driver.ExecerContext = (*conn)(nil) +var _ driver.QueryerContext = (*conn)(nil) +var _ driver.ConnPrepareContext = (*conn)(nil) +var _ driver.ConnBeginTx = (*conn)(nil) +var _ driver.NamedValueChecker = (*conn)(nil) + +func Succeeded(response *http.Response) bool { + if response.StatusCode == 200 || response.StatusCode == 201 || response.StatusCode == 202 || response.StatusCode == 204 { + return true + } + return false +} + +func (c *conn) handleStagingPut(ctx context.Context, presignedUrl string, headers map[string]string, localFile string) dbsqlerr.DBError { + if localFile == "" { + return dbsqlerrint.NewDriverError(ctx, "cannot perform PUT without specifying a local_file", nil) + } + client := &http.Client{} + + dat, err := os.Open(localFile) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error reading local file", err) + } + defer dat.Close() + + info, err := dat.Stat() + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error reading local file info", err) + } + + req, _ := http.NewRequest("PUT", presignedUrl, dat) + req.ContentLength = info.Size() // backend actually requires content length to be known + + for k, v := range headers { + req.Header.Set(k, v) + } + res, err := client.Do(req) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error sending http request", err) + } + defer res.Body.Close() + content, err := io.ReadAll(res.Body) + + if err != nil || !Succeeded(res) { + return dbsqlerrint.NewDriverError(ctx, fmt.Sprintf("staging operation over HTTP was unsuccessful: %d-%s", res.StatusCode, content), nil) + } + return nil + +} + +func (c *conn) handleStagingGet(ctx context.Context, presignedUrl string, headers map[string]string, localFile string) dbsqlerr.DBError { + if localFile == "" { + return dbsqlerrint.NewDriverError(ctx, "cannot perform GET without specifying a local_file", nil) + } + client := &http.Client{} + req, _ := http.NewRequest("GET", presignedUrl, nil) + + for k, v := range headers { + req.Header.Set(k, v) + } + res, err := client.Do(req) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error sending http request", err) + } + defer res.Body.Close() + content, err := io.ReadAll(res.Body) + + if err != nil || !Succeeded(res) { + return dbsqlerrint.NewDriverError(ctx, fmt.Sprintf("staging operation over HTTP was unsuccessful: %d-%s", res.StatusCode, content), nil) + } + + err = os.WriteFile(localFile, content, 0644) //nolint:gosec + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error writing local file", err) + } + return nil +} + +func (c *conn) handleStagingRemove(ctx context.Context, presignedUrl string, headers map[string]string) dbsqlerr.DBError { + client := &http.Client{} + req, _ := http.NewRequest("DELETE", presignedUrl, nil) + for k, v := range headers { + req.Header.Set(k, v) + } + res, err := client.Do(req) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error sending http request", err) + } + defer res.Body.Close() + content, err := io.ReadAll(res.Body) + + if err != nil || !Succeeded(res) { + return dbsqlerrint.NewDriverError(ctx, fmt.Sprintf("staging operation over HTTP was unsuccessful: %d-%s, nil", res.StatusCode, content), nil) + } + + return nil +} + +func localPathIsAllowed(stagingAllowedLocalPaths []string, localFile string) bool { + for i := range stagingAllowedLocalPaths { + // Convert both filepaths to absolute paths to avoid potential issues. + // + path, err := filepath.Abs(stagingAllowedLocalPaths[i]) + if err != nil { + return false + } + localFile, err := filepath.Abs(localFile) + if err != nil { + return false + } + relativePath, err := filepath.Rel(path, localFile) + if err != nil { + return false + } + if !strings.Contains(relativePath, "../") { + return true + } + } + return false +} + +func (c *conn) execStagingOperation( + exStmtResp *cli_service.TExecuteStatementResp, + ctx context.Context) dbsqlerr.DBError { + + if exStmtResp == nil || exStmtResp.OperationHandle == nil { + return nil + } + + corrId := driverctx.CorrelationIdFromContext(ctx) + var row driver.Rows + var err error + + var isStagingOperation bool + if exStmtResp.DirectResults != nil && exStmtResp.DirectResults.ResultSetMetadata != nil && exStmtResp.DirectResults.ResultSetMetadata.IsStagingOperation != nil { + isStagingOperation = *exStmtResp.DirectResults.ResultSetMetadata.IsStagingOperation + } else { + req := cli_service.TGetResultSetMetadataReq{ + OperationHandle: exStmtResp.OperationHandle, + } + resp, err := c.client.GetResultSetMetadata(ctx, &req) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error performing staging operation", err) + } + isStagingOperation = *resp.IsStagingOperation + } + + if !isStagingOperation { + return nil + } + + if len(driverctx.StagingPathsFromContext(ctx)) != 0 { + row, err = rows.NewRows(c.id, corrId, exStmtResp.OperationHandle, c.client, c.cfg, exStmtResp.DirectResults) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error reading row.", err) + } + + } else { + return dbsqlerrint.NewDriverError(ctx, "staging ctx must be provided.", nil) + } + + var sqlRow []driver.Value + colNames := row.Columns() + sqlRow = make([]driver.Value, len(colNames)) + err = row.Next(sqlRow) + if err != nil { + return dbsqlerrint.NewDriverError(ctx, "error fetching staging operation results", err) + } + var stringValues []string = make([]string, 4) + for i, val := range sqlRow { // this will either be 3 (remove op) or 4 (put/get) elements + if s, ok := val.(string); ok { + stringValues[i] = s + } else { + return dbsqlerrint.NewDriverError(ctx, "received unexpected response from the server.", nil) + } + } + operation := stringValues[0] + presignedUrl := stringValues[1] + headersByteArr := []byte(stringValues[2]) + var headers map[string]string + if err := json.Unmarshal(headersByteArr, &headers); err != nil { + return dbsqlerrint.NewDriverError(ctx, "error parsing server response.", nil) + } + localFile := stringValues[3] + stagingAllowedLocalPaths := driverctx.StagingPathsFromContext(ctx) + switch operation { + case "PUT": + if localPathIsAllowed(stagingAllowedLocalPaths, localFile) { + return c.handleStagingPut(ctx, presignedUrl, headers, localFile) + } else { + return dbsqlerrint.NewDriverError(ctx, "local file operations are restricted to paths within the configured stagingAllowedLocalPath", nil) + } + case "GET": + if localPathIsAllowed(stagingAllowedLocalPaths, localFile) { + return c.handleStagingGet(ctx, presignedUrl, headers, localFile) + } else { + return dbsqlerrint.NewDriverError(ctx, "local file operations are restricted to paths within the configured stagingAllowedLocalPath", nil) + } + case "REMOVE": + return c.handleStagingRemove(ctx, presignedUrl, headers) + default: + return dbsqlerrint.NewDriverError(ctx, fmt.Sprintf("operation %s is not supported. Supported operations are GET, PUT, and REMOVE", operation), nil) + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/connector.go b/vendor/github.com/databricks/databricks-sql-go/connector.go new file mode 100644 index 00000000..a5b80002 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/connector.go @@ -0,0 +1,285 @@ +package dbsql + +import ( + "context" + "crypto/tls" + "database/sql/driver" + "fmt" + "net/http" + "strings" + "time" + + "github.com/databricks/databricks-sql-go/auth" + "github.com/databricks/databricks-sql-go/auth/oauth/m2m" + "github.com/databricks/databricks-sql-go/auth/pat" + "github.com/databricks/databricks-sql-go/driverctx" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/client" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/logger" +) + +type connector struct { + cfg *config.Config + client *http.Client +} + +// Connect returns a connection to the Databricks database from a connection pool. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + var catalogName *cli_service.TIdentifier + var schemaName *cli_service.TIdentifier + if c.cfg.Catalog != "" { + catalogName = cli_service.TIdentifierPtr(cli_service.TIdentifier(c.cfg.Catalog)) + } + if c.cfg.Schema != "" { + schemaName = cli_service.TIdentifierPtr(cli_service.TIdentifier(c.cfg.Schema)) + } + + tclient, err := client.InitThriftClient(c.cfg, c.client) + if err != nil { + return nil, dbsqlerrint.NewDriverError(ctx, dbsqlerr.ErrThriftClient, err) + } + protocolVersion := int64(c.cfg.ThriftProtocolVersion) + session, err := tclient.OpenSession(ctx, &cli_service.TOpenSessionReq{ + ClientProtocolI64: &protocolVersion, + Configuration: make(map[string]string), + InitialNamespace: &cli_service.TNamespace{ + CatalogName: catalogName, + SchemaName: schemaName, + }, + CanUseMultipleCatalogs: &c.cfg.CanUseMultipleCatalogs, + }) + if err != nil { + return nil, dbsqlerrint.NewRequestError(ctx, fmt.Sprintf("error connecting: host=%s port=%d, httpPath=%s", c.cfg.Host, c.cfg.Port, c.cfg.HTTPPath), err) + } + + conn := &conn{ + id: client.SprintGuid(session.SessionHandle.GetSessionId().GUID), + cfg: c.cfg, + client: tclient, + session: session, + } + log := logger.WithContext(conn.id, driverctx.CorrelationIdFromContext(ctx), "") + + log.Info().Msgf("connect: host=%s port=%d httpPath=%s", c.cfg.Host, c.cfg.Port, c.cfg.HTTPPath) + + for k, v := range c.cfg.SessionParams { + setStmt := fmt.Sprintf("SET `%s` = `%s`;", k, v) + _, err := conn.ExecContext(ctx, setStmt, []driver.NamedValue{}) + if err != nil { + return nil, dbsqlerrint.NewExecutionError(ctx, fmt.Sprintf("error setting session param: %s", setStmt), err, nil) + } + log.Info().Msgf("set session parameter: param=%s value=%s", k, v) + } + return conn, nil +} + +// Driver returns underlying databricksDriver for compatibility with sql.DB Driver method +func (c *connector) Driver() driver.Driver { + return &databricksDriver{} +} + +var _ driver.Connector = (*connector)(nil) + +type ConnOption func(*config.Config) + +// NewConnector creates a connection that can be used with `sql.OpenDB()`. +// This is an easier way to set up the DB instead of having to construct a DSN string. +func NewConnector(options ...ConnOption) (driver.Connector, error) { + // config with default options + cfg := config.WithDefaults() + cfg.DriverVersion = DriverVersion + + for _, opt := range options { + opt(cfg) + } + + client := client.RetryableClient(cfg) + + return &connector{cfg: cfg, client: client}, nil +} + +func withUserConfig(ucfg config.UserConfig) ConnOption { + return func(c *config.Config) { + c.UserConfig = ucfg + } +} + +// WithServerHostname sets up the server hostname. Mandatory. +func WithServerHostname(host string) ConnOption { + return func(c *config.Config) { + protocol, hostname := parseHostName(host) + if protocol != "" { + c.Protocol = protocol + } + + c.Host = hostname + } +} + +func parseHostName(host string) (protocol, hostname string) { + hostname = host + if strings.HasPrefix(host, "https") { + hostname = strings.TrimPrefix(host, "https") + protocol = "https" + } else if strings.HasPrefix(host, "http") { + hostname = strings.TrimPrefix(host, "http") + protocol = "http" + } + + if protocol != "" { + hostname = strings.TrimPrefix(hostname, ":") + hostname = strings.TrimPrefix(hostname, "//") + } + + if hostname == "localhost" && protocol == "" { + protocol = "http" + } + + return +} + +// WithPort sets up the server port. Mandatory. +func WithPort(port int) ConnOption { + return func(c *config.Config) { + c.Port = port + } +} + +// WithRetries sets up retrying logic. Sane defaults are provided. Negative retryMax will disable retry behavior +// By default retryWaitMin = 1 * time.Second +// By default retryWaitMax = 30 * time.Second +// By default retryMax = 4 +func WithRetries(retryMax int, retryWaitMin time.Duration, retryWaitMax time.Duration) ConnOption { + return func(c *config.Config) { + c.RetryWaitMax = retryWaitMax + c.RetryWaitMin = retryWaitMin + c.RetryMax = retryMax + } +} + +// WithAccessToken sets up the Personal Access Token. Mandatory for now. +func WithAccessToken(token string) ConnOption { + return func(c *config.Config) { + if token != "" { + c.AccessToken = token + pat := &pat.PATAuth{ + AccessToken: token, + } + c.Authenticator = pat + } + } +} + +// WithHTTPPath sets up the endpoint to the warehouse. Mandatory. +func WithHTTPPath(path string) ConnOption { + return func(c *config.Config) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + c.HTTPPath = path + } +} + +// WithMaxRows sets up the max rows fetched per request. Default is 10000 +func WithMaxRows(n int) ConnOption { + return func(c *config.Config) { + if n != 0 { + c.MaxRows = n + } + } +} + +// WithTimeout adds timeout for the server query execution. Default is no timeout. +func WithTimeout(n time.Duration) ConnOption { + return func(c *config.Config) { + c.QueryTimeout = n + } +} + +// Sets the initial catalog name and schema name in the session. +// Use +func WithInitialNamespace(catalog, schema string) ConnOption { + return func(c *config.Config) { + c.Catalog = catalog + c.Schema = schema + } +} + +// Used to identify partners. Set as a string with format . +func WithUserAgentEntry(entry string) ConnOption { + return func(c *config.Config) { + c.UserAgentEntry = entry + } +} + +// Sessions params will be set upon opening the session by calling SET function. +// If using connection pool, session params can avoid successive calls of "SET ..." +func WithSessionParams(params map[string]string) ConnOption { + return func(c *config.Config) { + for k, v := range params { + if strings.ToLower(k) == "timezone" { + if loc, err := time.LoadLocation(v); err != nil { + logger.Error().Msgf("timezone %s is not valid", v) + } else { + c.Location = loc + } + } + } + c.SessionParams = params + } +} + +// WithSkipTLSHostVerify disables the verification of the hostname in the TLS certificate. +// WARNING: +// When this option is used, TLS is susceptible to machine-in-the-middle attacks. +// Please only use this option when the hostname is an internal private link hostname +func WithSkipTLSHostVerify() ConnOption { + return func(c *config.Config) { + if c.TLSConfig == nil { + c.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12, InsecureSkipVerify: true} // #nosec G402 + } else { + c.TLSConfig.InsecureSkipVerify = true // #nosec G402 + } + } +} + +// WithAuthenticator sets up the Authentication. Mandatory if access token is not provided. +func WithAuthenticator(authr auth.Authenticator) ConnOption { + return func(c *config.Config) { + c.Authenticator = authr + } +} + +// WithTransport sets up the transport configuration to be used by the httpclient. +func WithTransport(t http.RoundTripper) ConnOption { + return func(c *config.Config) { + c.Transport = t + } +} + +// WithCloudFetch sets up the use of cloud fetch for query execution. Default is false. +func WithCloudFetch(useCloudFetch bool) ConnOption { + return func(c *config.Config) { + c.UseCloudFetch = useCloudFetch + } +} + +// WithMaxDownloadThreads sets up maximum download threads for cloud fetch. Default is 10. +func WithMaxDownloadThreads(numThreads int) ConnOption { + return func(c *config.Config) { + c.MaxDownloadThreads = numThreads + } +} + +// Setup of Oauth M2m authentication +func WithClientCredentials(clientID, clientSecret string) ConnOption { + return func(c *config.Config) { + if clientID != "" && clientSecret != "" { + authr := m2m.NewAuthenticator(clientID, clientSecret, c.Host) + c.Authenticator = authr + } + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/doc.go b/vendor/github.com/databricks/databricks-sql-go/doc.go new file mode 100644 index 00000000..9463d771 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/doc.go @@ -0,0 +1,412 @@ +/* +Package dbsql implements the go driver to Databricks SQL + +# Usage + +Clients should use the database/sql package in conjunction with the driver: + + import ( + "database/sql" + + _ "github.com/databricks/databricks-sql-go" + ) + + func main() { + db, err := sql.Open("databricks", "token:@:/") + + if err != nil { + log.Fatal(err) + } + defer db.Close() + } + +# Connection via DSN (Data Source Name) + +Use sql.Open() to create a database handle via a data source name string: + + db, err := sql.Open("databricks", "") + +The DSN format is: + + token:[my_token]@[hostname]:[port]/[endpoint http path]?param=value + +Supported optional connection parameters can be specified in param=value and include: + + - catalog: Sets the initial catalog name in the session + - schema: Sets the initial schema name in the session + - maxRows: Sets up the max rows fetched per request. Default is 100000 + - timeout: Adds timeout (in seconds) for the server query execution. Default is no timeout + - userAgentEntry: Used to identify partners. Set as a string with format + - useCloudFetch: Used to enable cloud fetch for the query execution. Default is false + - maxDownloadThreads: Sets up the max number of concurrent workers for cloud fetch. Default is 10 + - authType: Specifies the desired authentication type. Valid values are: Pat, OauthM2M, OauthU2M + - accessToken: Personal access token. Required if authType set to Pat + - clientID: Specifies the client ID to use with OauthM2M + - clientSecret: Specifies the client secret to use with OauthM2M + +Supported optional session parameters can be specified in param=value and include: + + - ansi_mode: (Boolean string). Session statements will adhere to rules defined by ANSI SQL specification. + - timezone: (e.g. "America/Los_Angeles"). Sets the timezone of the session + +# Connection via new connector object + +Use sql.OpenDB() to create a database handle via a new connector object created with dbsql.NewConnector(): + + import ( + "database/sql" + dbsql "github.com/databricks/databricks-sql-go" + ) + + func main() { + connector, err := dbsql.NewConnector( + dbsql.WithServerHostname(), + dbsql.WithPort(), + dbsql.WithHTTPPath(), + dbsql.WithAccessToken() + ) + if err != nil { + log.Fatal(err) + } + + db := sql.OpenDB(connector) + defer db.Close() + ... + } + +Supported functional options include: + + - WithServerHostname( string): Sets up the server hostname. The hostname can be prefixed with "http:" or "https:" to specify a protocol to use. Mandatory + - WithPort( int): Sets up the server port. Mandatory + - WithAccessToken( string): Sets up the Personal Access Token. Mandatory + - WithHTTPPath( string): Sets up the endpoint to the warehouse. Mandatory + - WithInitialNamespace( string, string): Sets up the catalog and schema name in the session. Optional + - WithMaxRows( int): Sets up the max rows fetched per request. Default is 100000. Optional + - WithSessionParams( map[string]string): Sets up session parameters including "timezone" and "ansi_mode". Optional + - WithTimeout( Duration). Adds timeout (in time.Duration) for the server query execution. Default is no timeout. Optional + - WithUserAgentEntry( string). Used to identify partners. Optional + - WithCloudFetch (bool). Used to enable cloud fetch for the query execution. Default is false. Optional + - WithMaxDownloadThreads ( int). Sets up the max number of concurrent workers for cloud fetch. Default is 10. Optional + - WithAuthenticator ( auth.Authenticator). Sets up authentication. Required if neither access token or client credentials are provided. + - WithClientCredentials( string, string). Sets up Oauth M2M authentication. + +# Query cancellation and timeout + +Cancelling a query via context cancellation or timeout is supported. + + // Set up context timeout + ctx, cancel := context.WithTimeout(context.Background(), 30 * time.Second) + defer cancel() + + // Execute query. Query will be cancelled after 30 seconds if still running + res, err := db.ExecContext(ctx, "CREATE TABLE example(id int, message string)") + +# CorrelationId and ConnId + +Use the driverctx package under driverctx/ctx.go to add CorrelationId and ConnId to the context. +CorrelationId and ConnId makes it convenient to parse and create metrics in logging. + +**Connection Id** +Internal id to track what happens under a connection. Connections can be reused so this would track across queries. + +**Query Id** +Internal id to track what happens under a query. Useful because the same query can be used with multiple connections. + +**Correlation Id** +External id, such as request ID, to track what happens under a request. Useful to track multiple connections in the same request. + + ctx := dbsqlctx.NewContextWithCorrelationId(context.Background(), "workflow-example") + +# Logging + +Use the logger package under logger.go to set up logging (from zerolog). +By default, logging level is `warn`. If you want to disable logging, use `disabled`. +The user can also utilize Track() and Duration() to custom log the elapsed time of anything tracked. + + import ( + dbsqllog "github.com/databricks/databricks-sql-go/logger" + dbsqlctx "github.com/databricks/databricks-sql-go/driverctx" + ) + + func main() { + // Optional. Set the logging level with SetLogLevel() + if err := dbsqllog.SetLogLevel("debug"); err != nil { + log.Fatal(err) + } + + // Optional. Set logging output with SetLogOutput() + // Default is os.Stderr. If running in terminal, logger will use ConsoleWriter to prettify logs + dbsqllog.SetLogOutput(os.Stdout) + + // Optional. Set correlation id with NewContextWithCorrelationId + ctx := dbsqlctx.NewContextWithCorrelationId(context.Background(), "workflow-example") + + + // Optional. Track time spent and log elapsed time + msg, start := logger.Track("Run Main") + defer log.Duration(msg, start) + + db, err := sql.Open("databricks", "") + ... + } + +The result log may look like this: + + {"level":"debug","connId":"01ed6545-5669-1ec7-8c7e-6d8a1ea0ab16","corrId":"workflow-example","queryId":"01ed6545-57cc-188a-bfc5-d9c0eaf8e189","time":1668558402,"message":"Run Main elapsed time: 1.298712292s"} + +# Programmatically Retrieving Connection and Query Id + +Use the driverctx package under driverctx/ctx.go to add callbacks to the query context to receive the connection id and query id. + + import ( + "github.com/databricks/databricks-sql-go/driverctx" + ) + + func main() { + + ... + + qidCallback := func(id string) { + fmt.Println("query id: " + id) + } + + connIdCallback := func(id string) { + fmt.Println("connection id: " + id) + } + + ctx := context.Background() + ctx = driverctx.NewContextWithQueryIdCallback(ctx, qidCallback) + ctx = driverctx.NewContextWithConnIdCallback(ctx, connIdCallback) + + rows, err1 := db.QueryContext(ctx, `select * from sometable`) + + ... + + } + +# Query parameters + +Passing parameters to a query is supported when run against servers with version DBR 14.1. + + p := dbsql.Parameter{Name: "p_bool", Value: true}, + rows, err1 := db.QueryContext(ctx, `select * from sometable where condition=:p_bool`,dbsql.Parameter{Name: "p_bool", Value: true}) + +For complex types, you can specify the SQL type using the dbsql.Parameter type field. If this field is set, the value field MUST be set to a string. + +# Staging Ingestion + +The Go driver now supports staging operations. In order to use a staging operation, you first must update the context with a list of folders that you are allowing the driver to access. + + ctx := driverctx.NewContextWithStagingInfo(context.Background(), []string{"staging/"}) + +After doing so, you can execute staging operations using this context using the exec context. + + _, err1 := db.ExecContext(ctx, `PUT 'staging/file.csv' INTO '/Volumes/main/staging_test/e2etests/file.csv' OVERWRITE`) + +# Errors + +There are three error types exposed via dbsql/errors + + DBDriverError - An error in the go driver. Example: unimplemented functionality, invalid driver state, errors processing a server response, etc. + + DBRequestError - An error that is caused by an invalid request. Example: permission denied, invalid http path or other connection parameter, resource not available, etc. + + DBExecutionError - Any error that occurs after the SQL statement has been accepted such as a SQL syntax error, missing table, etc. + +Each type has a corresponding sentinel value which can be used with errors.Is() to determine if one of the types is present in an error chain. + + DriverError + RequestError + ExecutionError + +Example usage: + + import ( + fmt + errors + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + ) + + func main() { + ... + _, err := db.ExecContext(ogCtx, `Select id from range(100)`) + if err != nil { + if errors.Is(err, dbsqlerr.ExecutionError) { + var execErr dbsqlerr.DBExecutionError + if ok := errors.As(err, &execError); ok { + fmt.Printf("%s, corrId: %s, connId: %s, queryId: %s, sqlState: %s, isRetryable: %t, retryAfter: %f seconds", + execErr.Error(), + execErr.CorrelationId(), + execErr.ConnectionId(), + execErr.QueryId(), + execErr.SqlState(), + execErr.IsRetryable(), + execErr.RetryAfter().Seconds(), + ) + } + } + ... + } + ... + } + +See the documentation for dbsql/errors for more information. + +# Retrieving Arrow Batches + +The driver supports the ability to retrieve Apache Arrow record batches. +To work with record batches it is necessary to use sql.Conn.Raw() to access the underlying driver connection to retrieve a driver.Rows instance. +The driver exposes two public interfaces for working with record batches from the rows sub-package: + + type Rows interface { + GetArrowBatches(context.Context) (ArrowBatchIterator, error) + } + + type ArrowBatchIterator interface { + // Retrieve the next arrow.Record. + // Will return io.EOF if there are no more records + Next() (arrow.Record, error) + + // Return true if the iterator contains more batches, false otherwise. + HasNext() bool + + // Release any resources in use by the iterator. + Close() + } + +The driver.Rows instance retrieved using Conn.Raw() can be converted to a Databricks Rows instance via a type assertion, then use GetArrowBatches() to retrieve a batch iterator. +If the ArrowBatchIterator is not closed it will leak resources, such as the underlying connection. +Calling code must call Release() on records returned by DBSQLArrowBatchIterator.Next(). + +Example usage: + + import ( + ... + dbsqlrows "github.com/databricks/databricks-sql-go/rows" + ) + + func main() { + ... + db := sql.OpenDB(connector) + defer db.Close() + + conn, _ := db.Conn(context.BackGround()) + defer conn.Close() + + query := `select * from main.default.taxi_trip_data` + + var rows driver.Rows + var err error + err = conn.Raw(func(d interface{}) error { + rows, err = d.(driver.QueryerContext).QueryContext(ctx, query, nil) + return err + }) + + if err != nil { + log.Fatalf("unable to run the query. err: %v", err) + } + defer rows.Close() + + batches, err := rows.(dbsqlrows.Rows).GetArrowBatches(context.BackGround()) + if err != nil { + log.Fatalf("unable to get arrow batches. err: %v", err) + } + + var iBatch, nRows int + for batches.HasNext() { + b, err := batches.Next() + if err != nil { + log.Fatalf("Failure retrieving batch. err: %v", err) + } + + log.Printf("batch %v: nRecords=%v\n", iBatch, b.NumRows()) + iBatch += 1 + nRows += int(b.NumRows()) + b.Release() + } + log.Printf("NRows: %v\n", nRows) + } + +# Supported Data Types + +================================== + +Databricks Type --> Golang Type + +================================== + +BOOLEAN --> bool + +TINYINT --> int8 + +SMALLINT --> int16 + +INT --> int32 + +BIGINT --> int64 + +FLOAT --> float32 + +DOUBLE --> float64 + +VOID --> nil + +STRING --> string + +DATE --> time.Time + +TIMESTAMP --> time.Time + +DECIMAL(p,s) --> sql.RawBytes + +BINARY --> sql.RawBytes + +ARRAY --> sql.RawBytes + +STRUCT --> sql.RawBytes + +MAP --> sql.RawBytes + +INTERVAL (year-month) --> string + +INTERVAL (day-time) --> string + +For ARRAY, STRUCT, and MAP types, sql.Scan can cast sql.RawBytes to JSON string, which can be unmarshalled to Golang +arrays, maps, and structs. For example: + + type structVal struct { + StringField string `json:"string_field"` + ArrayField []int `json:"array_field"` + } + type row struct { + arrayVal []int + mapVal map[string]int + structVal structVal + } + res := []row{} + + for rows.Next() { + r := row{} + tempArray := []byte{} + tempStruct := []byte{} + tempMap := []byte{} + if err := rows.Scan(&tempArray, &tempMap, &tempStruct); err != nil { + log.Fatal(err) + } + if err := json.Unmarshal(tempArray, &r.arrayVal); err != nil { + log.Fatal(err) + } + if err := json.Unmarshal(tempMap, &r.mapVal); err != nil { + log.Fatal(err) + } + if err := json.Unmarshal(tempStruct, &r.structVal); err != nil { + log.Fatal(err) + } + res = append(res, r) + } + +May generate the following row: + + {arrayVal:[1,2,3] mapVal:{"key1":1} structVal:{"string_field":"string_val","array_field":[4,5,6]}} +*/ +package dbsql diff --git a/vendor/github.com/databricks/databricks-sql-go/driver.go b/vendor/github.com/databricks/databricks-sql-go/driver.go new file mode 100644 index 00000000..da59e2d2 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/driver.go @@ -0,0 +1,41 @@ +package dbsql + +import ( + "context" + "database/sql" + "database/sql/driver" + + "github.com/databricks/databricks-sql-go/internal/config" + _ "github.com/databricks/databricks-sql-go/logger" +) + +func init() { + sql.Register("databricks", &databricksDriver{}) +} + +var DriverVersion = "1.6.1" // update version before each release + +type databricksDriver struct{} + +// Open returns a new connection to Databricks database with a DSN string. +// Use sql.Open("databricks", ) after importing this driver package. +func (d *databricksDriver) Open(dsn string) (driver.Conn, error) { + cn, err := d.OpenConnector(dsn) + if err != nil { + return nil, err + } + return cn.Connect(context.Background()) +} + +// OpenConnector returns a new Connector. +// Used by sql.DB to obtain a Connector and invoke its Connect method to obtain each needed connection. +func (d *databricksDriver) OpenConnector(dsn string) (driver.Connector, error) { + ucfg, err := config.ParseDSN(dsn) + if err != nil { + return nil, err + } + return NewConnector(withUserConfig(ucfg)) +} + +var _ driver.Driver = (*databricksDriver)(nil) +var _ driver.DriverContext = (*databricksDriver)(nil) diff --git a/vendor/github.com/databricks/databricks-sql-go/driverctx/ctx.go b/vendor/github.com/databricks/databricks-sql-go/driverctx/ctx.go new file mode 100644 index 00000000..f8f4674d --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/driverctx/ctx.go @@ -0,0 +1,122 @@ +package driverctx + +import ( + "context" +) + +// Key name to look for Correlation Id in context +// using custom type to prevent key collision +type contextKey int + +const ( + CorrelationIdContextKey contextKey = iota + ConnIdContextKey + QueryIdContextKey + QueryIdCallbackKey + ConnIdCallbackKey + StagingAllowedLocalPathKey +) + +type IdCallbackFunc func(string) + +// NewContextWithCorrelationId creates a new context with correlationId value. Used by Logger to populate field corrId. +func NewContextWithCorrelationId(ctx context.Context, correlationId string) context.Context { + return context.WithValue(ctx, CorrelationIdContextKey, correlationId) +} + +// CorrelationIdFromContext retrieves the correlationId stored in context. +func CorrelationIdFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + corrId, ok := ctx.Value(CorrelationIdContextKey).(string) + if !ok { + return "" + } + return corrId +} + +// NewContextWithConnId creates a new context with connectionId value. +// The connection ID will be displayed in log messages and other dianostic information. +func NewContextWithConnId(ctx context.Context, connId string) context.Context { + if callback, ok := ctx.Value(ConnIdCallbackKey).(IdCallbackFunc); ok { + callback(connId) + } + return context.WithValue(ctx, ConnIdContextKey, connId) +} + +// ConnIdFromContext retrieves the connectionId stored in context. +func ConnIdFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + connId, ok := ctx.Value(ConnIdContextKey).(string) + if !ok { + return "" + } + return connId +} + +// NewContextWithQueryId creates a new context with queryId value. +// The query id will be displayed in log messages and other diagnostic information. +func NewContextWithQueryId(ctx context.Context, queryId string) context.Context { + if callback, ok := ctx.Value(QueryIdCallbackKey).(IdCallbackFunc); ok { + callback(queryId) + } + + return context.WithValue(ctx, QueryIdContextKey, queryId) +} + +// QueryIdFromContext retrieves the queryId stored in context. +func QueryIdFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + + queryId, ok := ctx.Value(QueryIdContextKey).(string) + if !ok { + return "" + } + return queryId +} + +// QueryIdFromContext retrieves the queryId stored in context. +func StagingPathsFromContext(ctx context.Context) []string { + if ctx == nil { + return []string{} + } + + stagingAllowedLocalPath, ok := ctx.Value(StagingAllowedLocalPathKey).([]string) + if !ok { + return []string{} + } + return stagingAllowedLocalPath +} + +func NewContextWithQueryIdCallback(ctx context.Context, callback IdCallbackFunc) context.Context { + return context.WithValue(ctx, QueryIdCallbackKey, callback) +} + +func NewContextWithConnIdCallback(ctx context.Context, callback IdCallbackFunc) context.Context { + return context.WithValue(ctx, ConnIdCallbackKey, callback) +} + +func NewContextWithStagingInfo(ctx context.Context, stagingAllowedLocalPath []string) context.Context { + return context.WithValue(ctx, StagingAllowedLocalPathKey, stagingAllowedLocalPath) +} + +func NewContextFromBackground(ctx context.Context) context.Context { + connId := ConnIdFromContext(ctx) + corrId := CorrelationIdFromContext(ctx) + queryId := QueryIdFromContext(ctx) + stagingPaths := StagingPathsFromContext(ctx) + + newCtx := NewContextWithConnId(context.Background(), connId) + newCtx = NewContextWithCorrelationId(newCtx, corrId) + newCtx = NewContextWithQueryId(newCtx, queryId) + newCtx = NewContextWithStagingInfo(newCtx, stagingPaths) + + return newCtx +} diff --git a/vendor/github.com/databricks/databricks-sql-go/errors/errors.go b/vendor/github.com/databricks/databricks-sql-go/errors/errors.go new file mode 100644 index 00000000..6a71245d --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/errors/errors.go @@ -0,0 +1,107 @@ +package errors + +import ( + "fmt" + "time" + + "github.com/pkg/errors" +) + +// Error messages +const ( + // Driver errors + ErrNotImplemented = "not implemented" + ErrTransactionsNotSupported = "transactions are not supported" + ErrReadQueryStatus = "could not read query status" + ErrSentinelTimeout = "sentinel timed out waiting for operation to complete" + ErrParametersNotSupported = "query parameters are not supported by this server" + + // Request error messages (connection, authentication, network error) + ErrCloseConnection = "failed to close connection" + ErrThriftClient = "error initializing thrift client" + ErrInvalidURL = "invalid URL" + + ErrNoAuthenticationMethod = "no authentication method set" + ErrNoDefaultAuthenticator = "unable to create default authenticator" + ErrInvalidDSNFormat = "invalid DSN: invalid format" + ErrInvalidDSNPort = "invalid DSN: invalid DSN port" + ErrInvalidDSNPATIsEmpty = "invalid DSN: empty token" + ErrBasicAuthNotSupported = "invalid DSN: basic auth not enabled" + ErrInvalidDSNM2m = "invalid DSN: clientId and clientSecret params required" + + // Execution error messages (query failure) + ErrQueryExecution = "failed to execute query" + ErrLinkExpired = "link expired" +) + +func InvalidDSNFormat(param string, value string, expected string) string { + return fmt.Sprintf("invalid DSN: param %s with value %s is not of type %s", param, value, expected) +} + +func ErrInvalidOperationState(state string) string { + return fmt.Sprintf("invalid operation state %s. This should not have happened", state) +} + +func ErrUnexpectedOperationState(state string) string { + return fmt.Sprintf("unexpected operation state %s", state) +} + +// value to be used with errors.Is() to determine if an error chain contains a request error +var RequestError error = errors.New("Request Error") + +// value to be used with errors.Is() to determine if an error chain contains a driver error +var DriverError error = errors.New("Driver Error") + +// value to be used with errors.Is() to determine if an error chain contains an execution error +var ExecutionError error = errors.New("Execution Error") + +// value to be used with errors.Is() to determine if an error chain contains any databricks error +var DatabricksError error = errors.New("Databricks Error") + +// Base interface for driver errors +type DBError interface { + // Descriptive message describing the error + Error() string + + // User specified id to track what happens under a request. Useful to track multiple connections in the same request. + // Appears in log messages as field corrId. See driverctx.NewContextWithCorrelationId() + CorrelationId() string + + // Internal id to track what happens under a connection. Connections can be reused so this would track across queries. + // Appears in log messages as field connId. + ConnectionId() string + + // Stack trace associated with the error. May be nil. + StackTrace() errors.StackTrace + + // Underlying causative error. May be nil. + Cause() error + + IsRetryable() bool + + RetryAfter() time.Duration +} + +// An error that is caused by an invalid request. +// Example: permission denied, or the user tries to access a warehouse that doesnโ€™t exist +type DBRequestError interface { + DBError +} + +// A fault that is caused by Databricks services +type DBDriverError interface { + DBError +} + +// Any error that occurs after the SQL statement has been accepted (e.g. SQL syntax error). +type DBExecutionError interface { + DBError + + // Internal id to track what happens under a query. + // Appears in log messages as field queryId. + QueryId() string + + // Optional portable error identifier across SQL engines. + // See https://github.com/apache/spark/tree/master/core/src/main/resources/error#ansiiso-standard + SqlState() string +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/GoUnusedProtection__.go b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/GoUnusedProtection__.go new file mode 100644 index 00000000..130d291d --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. + +package cli_service + +var GoUnusedProtection__ int; + diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service-consts.go b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service-consts.go new file mode 100644 index 00000000..1048c269 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service-consts.go @@ -0,0 +1,70 @@ +// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. + +package cli_service + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + thrift "github.com/apache/thrift/lib/go/thrift" + "strings" + "regexp" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = errors.New +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal +// (needed by validator.) +var _ = strings.Contains +var _ = regexp.MatchString + +var PRIMITIVE_TYPES []TTypeId +var COMPLEX_TYPES []TTypeId +var COLLECTION_TYPES []TTypeId +var TYPE_NAMES map[TTypeId]string +const CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength" +const PRECISION = "precision" +const SCALE = "scale" + +func init() { +PRIMITIVE_TYPES = []TTypeId{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20, 21, } + +COMPLEX_TYPES = []TTypeId{ + 10, 11, 12, 13, 14, } + +COLLECTION_TYPES = []TTypeId{ + 10, 11, } + +TYPE_NAMES = map[TTypeId]string{ + 10: "ARRAY", + 4: "BIGINT", + 9: "BINARY", + 0: "BOOLEAN", + 19: "CHAR", + 17: "DATE", + 15: "DECIMAL", + 6: "DOUBLE", + 5: "FLOAT", + 21: "INTERVAL_DAY_TIME", + 20: "INTERVAL_YEAR_MONTH", + 3: "INT", + 11: "MAP", + 16: "NULL", + 2: "SMALLINT", + 7: "STRING", + 12: "STRUCT", + 8: "TIMESTAMP", + 1: "TINYINT", + 13: "UNIONTYPE", + 18: "VARCHAR", +} + +} + diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service.go b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service.go new file mode 100644 index 00000000..b66e66b4 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/cli_service/cli_service.go @@ -0,0 +1,35826 @@ +// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. + +package cli_service + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + thrift "github.com/apache/thrift/lib/go/thrift" + "strings" + "regexp" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = errors.New +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal +// (needed by validator.) +var _ = strings.Contains +var _ = regexp.MatchString + +type TProtocolVersion int64 +const ( + TProtocolVersion___HIVE_JDBC_WORKAROUND TProtocolVersion = -7 + TProtocolVersion___TEST_PROTOCOL_VERSION TProtocolVersion = 65281 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V1 TProtocolVersion = 0 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V2 TProtocolVersion = 1 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V3 TProtocolVersion = 2 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V4 TProtocolVersion = 3 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V5 TProtocolVersion = 4 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V6 TProtocolVersion = 5 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V7 TProtocolVersion = 6 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V8 TProtocolVersion = 7 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V9 TProtocolVersion = 8 + TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V10 TProtocolVersion = 9 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V1 TProtocolVersion = 42241 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V2 TProtocolVersion = 42242 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V3 TProtocolVersion = 42243 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V4 TProtocolVersion = 42244 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V5 TProtocolVersion = 42245 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V6 TProtocolVersion = 42246 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V7 TProtocolVersion = 42247 + TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V8 TProtocolVersion = 42248 +) + +func (p TProtocolVersion) String() string { + switch p { + case TProtocolVersion___HIVE_JDBC_WORKAROUND: return "__HIVE_JDBC_WORKAROUND" + case TProtocolVersion___TEST_PROTOCOL_VERSION: return "__TEST_PROTOCOL_VERSION" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V1: return "HIVE_CLI_SERVICE_PROTOCOL_V1" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V2: return "HIVE_CLI_SERVICE_PROTOCOL_V2" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V3: return "HIVE_CLI_SERVICE_PROTOCOL_V3" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V4: return "HIVE_CLI_SERVICE_PROTOCOL_V4" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V5: return "HIVE_CLI_SERVICE_PROTOCOL_V5" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V6: return "HIVE_CLI_SERVICE_PROTOCOL_V6" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V7: return "HIVE_CLI_SERVICE_PROTOCOL_V7" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V8: return "HIVE_CLI_SERVICE_PROTOCOL_V8" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V9: return "HIVE_CLI_SERVICE_PROTOCOL_V9" + case TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V10: return "HIVE_CLI_SERVICE_PROTOCOL_V10" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V1: return "SPARK_CLI_SERVICE_PROTOCOL_V1" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V2: return "SPARK_CLI_SERVICE_PROTOCOL_V2" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V3: return "SPARK_CLI_SERVICE_PROTOCOL_V3" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V4: return "SPARK_CLI_SERVICE_PROTOCOL_V4" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V5: return "SPARK_CLI_SERVICE_PROTOCOL_V5" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V6: return "SPARK_CLI_SERVICE_PROTOCOL_V6" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V7: return "SPARK_CLI_SERVICE_PROTOCOL_V7" + case TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V8: return "SPARK_CLI_SERVICE_PROTOCOL_V8" + } + return "" +} + +func TProtocolVersionFromString(s string) (TProtocolVersion, error) { + switch s { + case "__HIVE_JDBC_WORKAROUND": return TProtocolVersion___HIVE_JDBC_WORKAROUND, nil + case "__TEST_PROTOCOL_VERSION": return TProtocolVersion___TEST_PROTOCOL_VERSION, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V1": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V1, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V2": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V2, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V3": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V3, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V4": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V4, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V5": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V5, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V6": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V6, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V7": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V7, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V8": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V8, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V9": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V9, nil + case "HIVE_CLI_SERVICE_PROTOCOL_V10": return TProtocolVersion_HIVE_CLI_SERVICE_PROTOCOL_V10, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V1": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V1, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V2": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V2, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V3": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V3, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V4": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V4, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V5": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V5, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V6": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V6, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V7": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V7, nil + case "SPARK_CLI_SERVICE_PROTOCOL_V8": return TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V8, nil + } + return TProtocolVersion(0), fmt.Errorf("not a valid TProtocolVersion string") +} + + +func TProtocolVersionPtr(v TProtocolVersion) *TProtocolVersion { return &v } + +func (p TProtocolVersion) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TProtocolVersion) UnmarshalText(text []byte) error { +q, err := TProtocolVersionFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TProtocolVersion) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TProtocolVersion(v) +return nil +} + +func (p * TProtocolVersion) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TTypeId int64 +const ( + TTypeId_BOOLEAN_TYPE TTypeId = 0 + TTypeId_TINYINT_TYPE TTypeId = 1 + TTypeId_SMALLINT_TYPE TTypeId = 2 + TTypeId_INT_TYPE TTypeId = 3 + TTypeId_BIGINT_TYPE TTypeId = 4 + TTypeId_FLOAT_TYPE TTypeId = 5 + TTypeId_DOUBLE_TYPE TTypeId = 6 + TTypeId_STRING_TYPE TTypeId = 7 + TTypeId_TIMESTAMP_TYPE TTypeId = 8 + TTypeId_BINARY_TYPE TTypeId = 9 + TTypeId_ARRAY_TYPE TTypeId = 10 + TTypeId_MAP_TYPE TTypeId = 11 + TTypeId_STRUCT_TYPE TTypeId = 12 + TTypeId_UNION_TYPE TTypeId = 13 + TTypeId_USER_DEFINED_TYPE TTypeId = 14 + TTypeId_DECIMAL_TYPE TTypeId = 15 + TTypeId_NULL_TYPE TTypeId = 16 + TTypeId_DATE_TYPE TTypeId = 17 + TTypeId_VARCHAR_TYPE TTypeId = 18 + TTypeId_CHAR_TYPE TTypeId = 19 + TTypeId_INTERVAL_YEAR_MONTH_TYPE TTypeId = 20 + TTypeId_INTERVAL_DAY_TIME_TYPE TTypeId = 21 +) + +func (p TTypeId) String() string { + switch p { + case TTypeId_BOOLEAN_TYPE: return "BOOLEAN_TYPE" + case TTypeId_TINYINT_TYPE: return "TINYINT_TYPE" + case TTypeId_SMALLINT_TYPE: return "SMALLINT_TYPE" + case TTypeId_INT_TYPE: return "INT_TYPE" + case TTypeId_BIGINT_TYPE: return "BIGINT_TYPE" + case TTypeId_FLOAT_TYPE: return "FLOAT_TYPE" + case TTypeId_DOUBLE_TYPE: return "DOUBLE_TYPE" + case TTypeId_STRING_TYPE: return "STRING_TYPE" + case TTypeId_TIMESTAMP_TYPE: return "TIMESTAMP_TYPE" + case TTypeId_BINARY_TYPE: return "BINARY_TYPE" + case TTypeId_ARRAY_TYPE: return "ARRAY_TYPE" + case TTypeId_MAP_TYPE: return "MAP_TYPE" + case TTypeId_STRUCT_TYPE: return "STRUCT_TYPE" + case TTypeId_UNION_TYPE: return "UNION_TYPE" + case TTypeId_USER_DEFINED_TYPE: return "USER_DEFINED_TYPE" + case TTypeId_DECIMAL_TYPE: return "DECIMAL_TYPE" + case TTypeId_NULL_TYPE: return "NULL_TYPE" + case TTypeId_DATE_TYPE: return "DATE_TYPE" + case TTypeId_VARCHAR_TYPE: return "VARCHAR_TYPE" + case TTypeId_CHAR_TYPE: return "CHAR_TYPE" + case TTypeId_INTERVAL_YEAR_MONTH_TYPE: return "INTERVAL_YEAR_MONTH_TYPE" + case TTypeId_INTERVAL_DAY_TIME_TYPE: return "INTERVAL_DAY_TIME_TYPE" + } + return "" +} + +func TTypeIdFromString(s string) (TTypeId, error) { + switch s { + case "BOOLEAN_TYPE": return TTypeId_BOOLEAN_TYPE, nil + case "TINYINT_TYPE": return TTypeId_TINYINT_TYPE, nil + case "SMALLINT_TYPE": return TTypeId_SMALLINT_TYPE, nil + case "INT_TYPE": return TTypeId_INT_TYPE, nil + case "BIGINT_TYPE": return TTypeId_BIGINT_TYPE, nil + case "FLOAT_TYPE": return TTypeId_FLOAT_TYPE, nil + case "DOUBLE_TYPE": return TTypeId_DOUBLE_TYPE, nil + case "STRING_TYPE": return TTypeId_STRING_TYPE, nil + case "TIMESTAMP_TYPE": return TTypeId_TIMESTAMP_TYPE, nil + case "BINARY_TYPE": return TTypeId_BINARY_TYPE, nil + case "ARRAY_TYPE": return TTypeId_ARRAY_TYPE, nil + case "MAP_TYPE": return TTypeId_MAP_TYPE, nil + case "STRUCT_TYPE": return TTypeId_STRUCT_TYPE, nil + case "UNION_TYPE": return TTypeId_UNION_TYPE, nil + case "USER_DEFINED_TYPE": return TTypeId_USER_DEFINED_TYPE, nil + case "DECIMAL_TYPE": return TTypeId_DECIMAL_TYPE, nil + case "NULL_TYPE": return TTypeId_NULL_TYPE, nil + case "DATE_TYPE": return TTypeId_DATE_TYPE, nil + case "VARCHAR_TYPE": return TTypeId_VARCHAR_TYPE, nil + case "CHAR_TYPE": return TTypeId_CHAR_TYPE, nil + case "INTERVAL_YEAR_MONTH_TYPE": return TTypeId_INTERVAL_YEAR_MONTH_TYPE, nil + case "INTERVAL_DAY_TIME_TYPE": return TTypeId_INTERVAL_DAY_TIME_TYPE, nil + } + return TTypeId(0), fmt.Errorf("not a valid TTypeId string") +} + + +func TTypeIdPtr(v TTypeId) *TTypeId { return &v } + +func (p TTypeId) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TTypeId) UnmarshalText(text []byte) error { +q, err := TTypeIdFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TTypeId) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TTypeId(v) +return nil +} + +func (p * TTypeId) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TSparkRowSetType int64 +const ( + TSparkRowSetType_ARROW_BASED_SET TSparkRowSetType = 0 + TSparkRowSetType_COLUMN_BASED_SET TSparkRowSetType = 1 + TSparkRowSetType_ROW_BASED_SET TSparkRowSetType = 2 + TSparkRowSetType_URL_BASED_SET TSparkRowSetType = 3 +) + +func (p TSparkRowSetType) String() string { + switch p { + case TSparkRowSetType_ARROW_BASED_SET: return "ARROW_BASED_SET" + case TSparkRowSetType_COLUMN_BASED_SET: return "COLUMN_BASED_SET" + case TSparkRowSetType_ROW_BASED_SET: return "ROW_BASED_SET" + case TSparkRowSetType_URL_BASED_SET: return "URL_BASED_SET" + } + return "" +} + +func TSparkRowSetTypeFromString(s string) (TSparkRowSetType, error) { + switch s { + case "ARROW_BASED_SET": return TSparkRowSetType_ARROW_BASED_SET, nil + case "COLUMN_BASED_SET": return TSparkRowSetType_COLUMN_BASED_SET, nil + case "ROW_BASED_SET": return TSparkRowSetType_ROW_BASED_SET, nil + case "URL_BASED_SET": return TSparkRowSetType_URL_BASED_SET, nil + } + return TSparkRowSetType(0), fmt.Errorf("not a valid TSparkRowSetType string") +} + + +func TSparkRowSetTypePtr(v TSparkRowSetType) *TSparkRowSetType { return &v } + +func (p TSparkRowSetType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TSparkRowSetType) UnmarshalText(text []byte) error { +q, err := TSparkRowSetTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TSparkRowSetType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TSparkRowSetType(v) +return nil +} + +func (p * TSparkRowSetType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TDBSqlCompressionCodec int64 +const ( + TDBSqlCompressionCodec_NONE TDBSqlCompressionCodec = 0 + TDBSqlCompressionCodec_LZ4_FRAME TDBSqlCompressionCodec = 1 + TDBSqlCompressionCodec_LZ4_BLOCK TDBSqlCompressionCodec = 2 +) + +func (p TDBSqlCompressionCodec) String() string { + switch p { + case TDBSqlCompressionCodec_NONE: return "NONE" + case TDBSqlCompressionCodec_LZ4_FRAME: return "LZ4_FRAME" + case TDBSqlCompressionCodec_LZ4_BLOCK: return "LZ4_BLOCK" + } + return "" +} + +func TDBSqlCompressionCodecFromString(s string) (TDBSqlCompressionCodec, error) { + switch s { + case "NONE": return TDBSqlCompressionCodec_NONE, nil + case "LZ4_FRAME": return TDBSqlCompressionCodec_LZ4_FRAME, nil + case "LZ4_BLOCK": return TDBSqlCompressionCodec_LZ4_BLOCK, nil + } + return TDBSqlCompressionCodec(0), fmt.Errorf("not a valid TDBSqlCompressionCodec string") +} + + +func TDBSqlCompressionCodecPtr(v TDBSqlCompressionCodec) *TDBSqlCompressionCodec { return &v } + +func (p TDBSqlCompressionCodec) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TDBSqlCompressionCodec) UnmarshalText(text []byte) error { +q, err := TDBSqlCompressionCodecFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TDBSqlCompressionCodec) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TDBSqlCompressionCodec(v) +return nil +} + +func (p * TDBSqlCompressionCodec) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TDBSqlArrowLayout int64 +const ( + TDBSqlArrowLayout_ARROW_BATCH TDBSqlArrowLayout = 0 + TDBSqlArrowLayout_ARROW_STREAMING TDBSqlArrowLayout = 1 +) + +func (p TDBSqlArrowLayout) String() string { + switch p { + case TDBSqlArrowLayout_ARROW_BATCH: return "ARROW_BATCH" + case TDBSqlArrowLayout_ARROW_STREAMING: return "ARROW_STREAMING" + } + return "" +} + +func TDBSqlArrowLayoutFromString(s string) (TDBSqlArrowLayout, error) { + switch s { + case "ARROW_BATCH": return TDBSqlArrowLayout_ARROW_BATCH, nil + case "ARROW_STREAMING": return TDBSqlArrowLayout_ARROW_STREAMING, nil + } + return TDBSqlArrowLayout(0), fmt.Errorf("not a valid TDBSqlArrowLayout string") +} + + +func TDBSqlArrowLayoutPtr(v TDBSqlArrowLayout) *TDBSqlArrowLayout { return &v } + +func (p TDBSqlArrowLayout) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TDBSqlArrowLayout) UnmarshalText(text []byte) error { +q, err := TDBSqlArrowLayoutFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TDBSqlArrowLayout) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TDBSqlArrowLayout(v) +return nil +} + +func (p * TDBSqlArrowLayout) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TOperationIdempotencyType int64 +const ( + TOperationIdempotencyType_UNKNOWN TOperationIdempotencyType = 0 + TOperationIdempotencyType_NON_IDEMPOTENT TOperationIdempotencyType = 1 + TOperationIdempotencyType_IDEMPOTENT TOperationIdempotencyType = 2 +) + +func (p TOperationIdempotencyType) String() string { + switch p { + case TOperationIdempotencyType_UNKNOWN: return "UNKNOWN" + case TOperationIdempotencyType_NON_IDEMPOTENT: return "NON_IDEMPOTENT" + case TOperationIdempotencyType_IDEMPOTENT: return "IDEMPOTENT" + } + return "" +} + +func TOperationIdempotencyTypeFromString(s string) (TOperationIdempotencyType, error) { + switch s { + case "UNKNOWN": return TOperationIdempotencyType_UNKNOWN, nil + case "NON_IDEMPOTENT": return TOperationIdempotencyType_NON_IDEMPOTENT, nil + case "IDEMPOTENT": return TOperationIdempotencyType_IDEMPOTENT, nil + } + return TOperationIdempotencyType(0), fmt.Errorf("not a valid TOperationIdempotencyType string") +} + + +func TOperationIdempotencyTypePtr(v TOperationIdempotencyType) *TOperationIdempotencyType { return &v } + +func (p TOperationIdempotencyType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TOperationIdempotencyType) UnmarshalText(text []byte) error { +q, err := TOperationIdempotencyTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TOperationIdempotencyType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TOperationIdempotencyType(v) +return nil +} + +func (p * TOperationIdempotencyType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TOperationTimeoutLevel int64 +const ( + TOperationTimeoutLevel_CLUSTER TOperationTimeoutLevel = 0 + TOperationTimeoutLevel_SESSION TOperationTimeoutLevel = 1 +) + +func (p TOperationTimeoutLevel) String() string { + switch p { + case TOperationTimeoutLevel_CLUSTER: return "CLUSTER" + case TOperationTimeoutLevel_SESSION: return "SESSION" + } + return "" +} + +func TOperationTimeoutLevelFromString(s string) (TOperationTimeoutLevel, error) { + switch s { + case "CLUSTER": return TOperationTimeoutLevel_CLUSTER, nil + case "SESSION": return TOperationTimeoutLevel_SESSION, nil + } + return TOperationTimeoutLevel(0), fmt.Errorf("not a valid TOperationTimeoutLevel string") +} + + +func TOperationTimeoutLevelPtr(v TOperationTimeoutLevel) *TOperationTimeoutLevel { return &v } + +func (p TOperationTimeoutLevel) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TOperationTimeoutLevel) UnmarshalText(text []byte) error { +q, err := TOperationTimeoutLevelFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TOperationTimeoutLevel) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TOperationTimeoutLevel(v) +return nil +} + +func (p * TOperationTimeoutLevel) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TStatusCode int64 +const ( + TStatusCode_SUCCESS_STATUS TStatusCode = 0 + TStatusCode_SUCCESS_WITH_INFO_STATUS TStatusCode = 1 + TStatusCode_STILL_EXECUTING_STATUS TStatusCode = 2 + TStatusCode_ERROR_STATUS TStatusCode = 3 + TStatusCode_INVALID_HANDLE_STATUS TStatusCode = 4 +) + +func (p TStatusCode) String() string { + switch p { + case TStatusCode_SUCCESS_STATUS: return "SUCCESS_STATUS" + case TStatusCode_SUCCESS_WITH_INFO_STATUS: return "SUCCESS_WITH_INFO_STATUS" + case TStatusCode_STILL_EXECUTING_STATUS: return "STILL_EXECUTING_STATUS" + case TStatusCode_ERROR_STATUS: return "ERROR_STATUS" + case TStatusCode_INVALID_HANDLE_STATUS: return "INVALID_HANDLE_STATUS" + } + return "" +} + +func TStatusCodeFromString(s string) (TStatusCode, error) { + switch s { + case "SUCCESS_STATUS": return TStatusCode_SUCCESS_STATUS, nil + case "SUCCESS_WITH_INFO_STATUS": return TStatusCode_SUCCESS_WITH_INFO_STATUS, nil + case "STILL_EXECUTING_STATUS": return TStatusCode_STILL_EXECUTING_STATUS, nil + case "ERROR_STATUS": return TStatusCode_ERROR_STATUS, nil + case "INVALID_HANDLE_STATUS": return TStatusCode_INVALID_HANDLE_STATUS, nil + } + return TStatusCode(0), fmt.Errorf("not a valid TStatusCode string") +} + + +func TStatusCodePtr(v TStatusCode) *TStatusCode { return &v } + +func (p TStatusCode) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TStatusCode) UnmarshalText(text []byte) error { +q, err := TStatusCodeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TStatusCode) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TStatusCode(v) +return nil +} + +func (p * TStatusCode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TOperationState int64 +const ( + TOperationState_INITIALIZED_STATE TOperationState = 0 + TOperationState_RUNNING_STATE TOperationState = 1 + TOperationState_FINISHED_STATE TOperationState = 2 + TOperationState_CANCELED_STATE TOperationState = 3 + TOperationState_CLOSED_STATE TOperationState = 4 + TOperationState_ERROR_STATE TOperationState = 5 + TOperationState_UKNOWN_STATE TOperationState = 6 + TOperationState_PENDING_STATE TOperationState = 7 + TOperationState_TIMEDOUT_STATE TOperationState = 8 +) + +func (p TOperationState) String() string { + switch p { + case TOperationState_INITIALIZED_STATE: return "INITIALIZED_STATE" + case TOperationState_RUNNING_STATE: return "RUNNING_STATE" + case TOperationState_FINISHED_STATE: return "FINISHED_STATE" + case TOperationState_CANCELED_STATE: return "CANCELED_STATE" + case TOperationState_CLOSED_STATE: return "CLOSED_STATE" + case TOperationState_ERROR_STATE: return "ERROR_STATE" + case TOperationState_UKNOWN_STATE: return "UKNOWN_STATE" + case TOperationState_PENDING_STATE: return "PENDING_STATE" + case TOperationState_TIMEDOUT_STATE: return "TIMEDOUT_STATE" + } + return "" +} + +func TOperationStateFromString(s string) (TOperationState, error) { + switch s { + case "INITIALIZED_STATE": return TOperationState_INITIALIZED_STATE, nil + case "RUNNING_STATE": return TOperationState_RUNNING_STATE, nil + case "FINISHED_STATE": return TOperationState_FINISHED_STATE, nil + case "CANCELED_STATE": return TOperationState_CANCELED_STATE, nil + case "CLOSED_STATE": return TOperationState_CLOSED_STATE, nil + case "ERROR_STATE": return TOperationState_ERROR_STATE, nil + case "UKNOWN_STATE": return TOperationState_UKNOWN_STATE, nil + case "PENDING_STATE": return TOperationState_PENDING_STATE, nil + case "TIMEDOUT_STATE": return TOperationState_TIMEDOUT_STATE, nil + } + return TOperationState(0), fmt.Errorf("not a valid TOperationState string") +} + + +func TOperationStatePtr(v TOperationState) *TOperationState { return &v } + +func (p TOperationState) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TOperationState) UnmarshalText(text []byte) error { +q, err := TOperationStateFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TOperationState) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TOperationState(v) +return nil +} + +func (p * TOperationState) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TOperationType int64 +const ( + TOperationType_EXECUTE_STATEMENT TOperationType = 0 + TOperationType_GET_TYPE_INFO TOperationType = 1 + TOperationType_GET_CATALOGS TOperationType = 2 + TOperationType_GET_SCHEMAS TOperationType = 3 + TOperationType_GET_TABLES TOperationType = 4 + TOperationType_GET_TABLE_TYPES TOperationType = 5 + TOperationType_GET_COLUMNS TOperationType = 6 + TOperationType_GET_FUNCTIONS TOperationType = 7 + TOperationType_UNKNOWN TOperationType = 8 +) + +func (p TOperationType) String() string { + switch p { + case TOperationType_EXECUTE_STATEMENT: return "EXECUTE_STATEMENT" + case TOperationType_GET_TYPE_INFO: return "GET_TYPE_INFO" + case TOperationType_GET_CATALOGS: return "GET_CATALOGS" + case TOperationType_GET_SCHEMAS: return "GET_SCHEMAS" + case TOperationType_GET_TABLES: return "GET_TABLES" + case TOperationType_GET_TABLE_TYPES: return "GET_TABLE_TYPES" + case TOperationType_GET_COLUMNS: return "GET_COLUMNS" + case TOperationType_GET_FUNCTIONS: return "GET_FUNCTIONS" + case TOperationType_UNKNOWN: return "UNKNOWN" + } + return "" +} + +func TOperationTypeFromString(s string) (TOperationType, error) { + switch s { + case "EXECUTE_STATEMENT": return TOperationType_EXECUTE_STATEMENT, nil + case "GET_TYPE_INFO": return TOperationType_GET_TYPE_INFO, nil + case "GET_CATALOGS": return TOperationType_GET_CATALOGS, nil + case "GET_SCHEMAS": return TOperationType_GET_SCHEMAS, nil + case "GET_TABLES": return TOperationType_GET_TABLES, nil + case "GET_TABLE_TYPES": return TOperationType_GET_TABLE_TYPES, nil + case "GET_COLUMNS": return TOperationType_GET_COLUMNS, nil + case "GET_FUNCTIONS": return TOperationType_GET_FUNCTIONS, nil + case "UNKNOWN": return TOperationType_UNKNOWN, nil + } + return TOperationType(0), fmt.Errorf("not a valid TOperationType string") +} + + +func TOperationTypePtr(v TOperationType) *TOperationType { return &v } + +func (p TOperationType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TOperationType) UnmarshalText(text []byte) error { +q, err := TOperationTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TOperationType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TOperationType(v) +return nil +} + +func (p * TOperationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TGetInfoType int64 +const ( + TGetInfoType_CLI_MAX_DRIVER_CONNECTIONS TGetInfoType = 0 + TGetInfoType_CLI_MAX_CONCURRENT_ACTIVITIES TGetInfoType = 1 + TGetInfoType_CLI_DATA_SOURCE_NAME TGetInfoType = 2 + TGetInfoType_CLI_FETCH_DIRECTION TGetInfoType = 8 + TGetInfoType_CLI_SERVER_NAME TGetInfoType = 13 + TGetInfoType_CLI_SEARCH_PATTERN_ESCAPE TGetInfoType = 14 + TGetInfoType_CLI_DBMS_NAME TGetInfoType = 17 + TGetInfoType_CLI_DBMS_VER TGetInfoType = 18 + TGetInfoType_CLI_ACCESSIBLE_TABLES TGetInfoType = 19 + TGetInfoType_CLI_ACCESSIBLE_PROCEDURES TGetInfoType = 20 + TGetInfoType_CLI_CURSOR_COMMIT_BEHAVIOR TGetInfoType = 23 + TGetInfoType_CLI_DATA_SOURCE_READ_ONLY TGetInfoType = 25 + TGetInfoType_CLI_DEFAULT_TXN_ISOLATION TGetInfoType = 26 + TGetInfoType_CLI_IDENTIFIER_CASE TGetInfoType = 28 + TGetInfoType_CLI_IDENTIFIER_QUOTE_CHAR TGetInfoType = 29 + TGetInfoType_CLI_MAX_COLUMN_NAME_LEN TGetInfoType = 30 + TGetInfoType_CLI_MAX_CURSOR_NAME_LEN TGetInfoType = 31 + TGetInfoType_CLI_MAX_SCHEMA_NAME_LEN TGetInfoType = 32 + TGetInfoType_CLI_MAX_CATALOG_NAME_LEN TGetInfoType = 34 + TGetInfoType_CLI_MAX_TABLE_NAME_LEN TGetInfoType = 35 + TGetInfoType_CLI_SCROLL_CONCURRENCY TGetInfoType = 43 + TGetInfoType_CLI_TXN_CAPABLE TGetInfoType = 46 + TGetInfoType_CLI_USER_NAME TGetInfoType = 47 + TGetInfoType_CLI_TXN_ISOLATION_OPTION TGetInfoType = 72 + TGetInfoType_CLI_INTEGRITY TGetInfoType = 73 + TGetInfoType_CLI_GETDATA_EXTENSIONS TGetInfoType = 81 + TGetInfoType_CLI_NULL_COLLATION TGetInfoType = 85 + TGetInfoType_CLI_ALTER_TABLE TGetInfoType = 86 + TGetInfoType_CLI_ORDER_BY_COLUMNS_IN_SELECT TGetInfoType = 90 + TGetInfoType_CLI_SPECIAL_CHARACTERS TGetInfoType = 94 + TGetInfoType_CLI_MAX_COLUMNS_IN_GROUP_BY TGetInfoType = 97 + TGetInfoType_CLI_MAX_COLUMNS_IN_INDEX TGetInfoType = 98 + TGetInfoType_CLI_MAX_COLUMNS_IN_ORDER_BY TGetInfoType = 99 + TGetInfoType_CLI_MAX_COLUMNS_IN_SELECT TGetInfoType = 100 + TGetInfoType_CLI_MAX_COLUMNS_IN_TABLE TGetInfoType = 101 + TGetInfoType_CLI_MAX_INDEX_SIZE TGetInfoType = 102 + TGetInfoType_CLI_MAX_ROW_SIZE TGetInfoType = 104 + TGetInfoType_CLI_MAX_STATEMENT_LEN TGetInfoType = 105 + TGetInfoType_CLI_MAX_TABLES_IN_SELECT TGetInfoType = 106 + TGetInfoType_CLI_MAX_USER_NAME_LEN TGetInfoType = 107 + TGetInfoType_CLI_OJ_CAPABILITIES TGetInfoType = 115 + TGetInfoType_CLI_XOPEN_CLI_YEAR TGetInfoType = 10000 + TGetInfoType_CLI_CURSOR_SENSITIVITY TGetInfoType = 10001 + TGetInfoType_CLI_DESCRIBE_PARAMETER TGetInfoType = 10002 + TGetInfoType_CLI_CATALOG_NAME TGetInfoType = 10003 + TGetInfoType_CLI_COLLATION_SEQ TGetInfoType = 10004 + TGetInfoType_CLI_MAX_IDENTIFIER_LEN TGetInfoType = 10005 +) + +func (p TGetInfoType) String() string { + switch p { + case TGetInfoType_CLI_MAX_DRIVER_CONNECTIONS: return "CLI_MAX_DRIVER_CONNECTIONS" + case TGetInfoType_CLI_MAX_CONCURRENT_ACTIVITIES: return "CLI_MAX_CONCURRENT_ACTIVITIES" + case TGetInfoType_CLI_DATA_SOURCE_NAME: return "CLI_DATA_SOURCE_NAME" + case TGetInfoType_CLI_FETCH_DIRECTION: return "CLI_FETCH_DIRECTION" + case TGetInfoType_CLI_SERVER_NAME: return "CLI_SERVER_NAME" + case TGetInfoType_CLI_SEARCH_PATTERN_ESCAPE: return "CLI_SEARCH_PATTERN_ESCAPE" + case TGetInfoType_CLI_DBMS_NAME: return "CLI_DBMS_NAME" + case TGetInfoType_CLI_DBMS_VER: return "CLI_DBMS_VER" + case TGetInfoType_CLI_ACCESSIBLE_TABLES: return "CLI_ACCESSIBLE_TABLES" + case TGetInfoType_CLI_ACCESSIBLE_PROCEDURES: return "CLI_ACCESSIBLE_PROCEDURES" + case TGetInfoType_CLI_CURSOR_COMMIT_BEHAVIOR: return "CLI_CURSOR_COMMIT_BEHAVIOR" + case TGetInfoType_CLI_DATA_SOURCE_READ_ONLY: return "CLI_DATA_SOURCE_READ_ONLY" + case TGetInfoType_CLI_DEFAULT_TXN_ISOLATION: return "CLI_DEFAULT_TXN_ISOLATION" + case TGetInfoType_CLI_IDENTIFIER_CASE: return "CLI_IDENTIFIER_CASE" + case TGetInfoType_CLI_IDENTIFIER_QUOTE_CHAR: return "CLI_IDENTIFIER_QUOTE_CHAR" + case TGetInfoType_CLI_MAX_COLUMN_NAME_LEN: return "CLI_MAX_COLUMN_NAME_LEN" + case TGetInfoType_CLI_MAX_CURSOR_NAME_LEN: return "CLI_MAX_CURSOR_NAME_LEN" + case TGetInfoType_CLI_MAX_SCHEMA_NAME_LEN: return "CLI_MAX_SCHEMA_NAME_LEN" + case TGetInfoType_CLI_MAX_CATALOG_NAME_LEN: return "CLI_MAX_CATALOG_NAME_LEN" + case TGetInfoType_CLI_MAX_TABLE_NAME_LEN: return "CLI_MAX_TABLE_NAME_LEN" + case TGetInfoType_CLI_SCROLL_CONCURRENCY: return "CLI_SCROLL_CONCURRENCY" + case TGetInfoType_CLI_TXN_CAPABLE: return "CLI_TXN_CAPABLE" + case TGetInfoType_CLI_USER_NAME: return "CLI_USER_NAME" + case TGetInfoType_CLI_TXN_ISOLATION_OPTION: return "CLI_TXN_ISOLATION_OPTION" + case TGetInfoType_CLI_INTEGRITY: return "CLI_INTEGRITY" + case TGetInfoType_CLI_GETDATA_EXTENSIONS: return "CLI_GETDATA_EXTENSIONS" + case TGetInfoType_CLI_NULL_COLLATION: return "CLI_NULL_COLLATION" + case TGetInfoType_CLI_ALTER_TABLE: return "CLI_ALTER_TABLE" + case TGetInfoType_CLI_ORDER_BY_COLUMNS_IN_SELECT: return "CLI_ORDER_BY_COLUMNS_IN_SELECT" + case TGetInfoType_CLI_SPECIAL_CHARACTERS: return "CLI_SPECIAL_CHARACTERS" + case TGetInfoType_CLI_MAX_COLUMNS_IN_GROUP_BY: return "CLI_MAX_COLUMNS_IN_GROUP_BY" + case TGetInfoType_CLI_MAX_COLUMNS_IN_INDEX: return "CLI_MAX_COLUMNS_IN_INDEX" + case TGetInfoType_CLI_MAX_COLUMNS_IN_ORDER_BY: return "CLI_MAX_COLUMNS_IN_ORDER_BY" + case TGetInfoType_CLI_MAX_COLUMNS_IN_SELECT: return "CLI_MAX_COLUMNS_IN_SELECT" + case TGetInfoType_CLI_MAX_COLUMNS_IN_TABLE: return "CLI_MAX_COLUMNS_IN_TABLE" + case TGetInfoType_CLI_MAX_INDEX_SIZE: return "CLI_MAX_INDEX_SIZE" + case TGetInfoType_CLI_MAX_ROW_SIZE: return "CLI_MAX_ROW_SIZE" + case TGetInfoType_CLI_MAX_STATEMENT_LEN: return "CLI_MAX_STATEMENT_LEN" + case TGetInfoType_CLI_MAX_TABLES_IN_SELECT: return "CLI_MAX_TABLES_IN_SELECT" + case TGetInfoType_CLI_MAX_USER_NAME_LEN: return "CLI_MAX_USER_NAME_LEN" + case TGetInfoType_CLI_OJ_CAPABILITIES: return "CLI_OJ_CAPABILITIES" + case TGetInfoType_CLI_XOPEN_CLI_YEAR: return "CLI_XOPEN_CLI_YEAR" + case TGetInfoType_CLI_CURSOR_SENSITIVITY: return "CLI_CURSOR_SENSITIVITY" + case TGetInfoType_CLI_DESCRIBE_PARAMETER: return "CLI_DESCRIBE_PARAMETER" + case TGetInfoType_CLI_CATALOG_NAME: return "CLI_CATALOG_NAME" + case TGetInfoType_CLI_COLLATION_SEQ: return "CLI_COLLATION_SEQ" + case TGetInfoType_CLI_MAX_IDENTIFIER_LEN: return "CLI_MAX_IDENTIFIER_LEN" + } + return "" +} + +func TGetInfoTypeFromString(s string) (TGetInfoType, error) { + switch s { + case "CLI_MAX_DRIVER_CONNECTIONS": return TGetInfoType_CLI_MAX_DRIVER_CONNECTIONS, nil + case "CLI_MAX_CONCURRENT_ACTIVITIES": return TGetInfoType_CLI_MAX_CONCURRENT_ACTIVITIES, nil + case "CLI_DATA_SOURCE_NAME": return TGetInfoType_CLI_DATA_SOURCE_NAME, nil + case "CLI_FETCH_DIRECTION": return TGetInfoType_CLI_FETCH_DIRECTION, nil + case "CLI_SERVER_NAME": return TGetInfoType_CLI_SERVER_NAME, nil + case "CLI_SEARCH_PATTERN_ESCAPE": return TGetInfoType_CLI_SEARCH_PATTERN_ESCAPE, nil + case "CLI_DBMS_NAME": return TGetInfoType_CLI_DBMS_NAME, nil + case "CLI_DBMS_VER": return TGetInfoType_CLI_DBMS_VER, nil + case "CLI_ACCESSIBLE_TABLES": return TGetInfoType_CLI_ACCESSIBLE_TABLES, nil + case "CLI_ACCESSIBLE_PROCEDURES": return TGetInfoType_CLI_ACCESSIBLE_PROCEDURES, nil + case "CLI_CURSOR_COMMIT_BEHAVIOR": return TGetInfoType_CLI_CURSOR_COMMIT_BEHAVIOR, nil + case "CLI_DATA_SOURCE_READ_ONLY": return TGetInfoType_CLI_DATA_SOURCE_READ_ONLY, nil + case "CLI_DEFAULT_TXN_ISOLATION": return TGetInfoType_CLI_DEFAULT_TXN_ISOLATION, nil + case "CLI_IDENTIFIER_CASE": return TGetInfoType_CLI_IDENTIFIER_CASE, nil + case "CLI_IDENTIFIER_QUOTE_CHAR": return TGetInfoType_CLI_IDENTIFIER_QUOTE_CHAR, nil + case "CLI_MAX_COLUMN_NAME_LEN": return TGetInfoType_CLI_MAX_COLUMN_NAME_LEN, nil + case "CLI_MAX_CURSOR_NAME_LEN": return TGetInfoType_CLI_MAX_CURSOR_NAME_LEN, nil + case "CLI_MAX_SCHEMA_NAME_LEN": return TGetInfoType_CLI_MAX_SCHEMA_NAME_LEN, nil + case "CLI_MAX_CATALOG_NAME_LEN": return TGetInfoType_CLI_MAX_CATALOG_NAME_LEN, nil + case "CLI_MAX_TABLE_NAME_LEN": return TGetInfoType_CLI_MAX_TABLE_NAME_LEN, nil + case "CLI_SCROLL_CONCURRENCY": return TGetInfoType_CLI_SCROLL_CONCURRENCY, nil + case "CLI_TXN_CAPABLE": return TGetInfoType_CLI_TXN_CAPABLE, nil + case "CLI_USER_NAME": return TGetInfoType_CLI_USER_NAME, nil + case "CLI_TXN_ISOLATION_OPTION": return TGetInfoType_CLI_TXN_ISOLATION_OPTION, nil + case "CLI_INTEGRITY": return TGetInfoType_CLI_INTEGRITY, nil + case "CLI_GETDATA_EXTENSIONS": return TGetInfoType_CLI_GETDATA_EXTENSIONS, nil + case "CLI_NULL_COLLATION": return TGetInfoType_CLI_NULL_COLLATION, nil + case "CLI_ALTER_TABLE": return TGetInfoType_CLI_ALTER_TABLE, nil + case "CLI_ORDER_BY_COLUMNS_IN_SELECT": return TGetInfoType_CLI_ORDER_BY_COLUMNS_IN_SELECT, nil + case "CLI_SPECIAL_CHARACTERS": return TGetInfoType_CLI_SPECIAL_CHARACTERS, nil + case "CLI_MAX_COLUMNS_IN_GROUP_BY": return TGetInfoType_CLI_MAX_COLUMNS_IN_GROUP_BY, nil + case "CLI_MAX_COLUMNS_IN_INDEX": return TGetInfoType_CLI_MAX_COLUMNS_IN_INDEX, nil + case "CLI_MAX_COLUMNS_IN_ORDER_BY": return TGetInfoType_CLI_MAX_COLUMNS_IN_ORDER_BY, nil + case "CLI_MAX_COLUMNS_IN_SELECT": return TGetInfoType_CLI_MAX_COLUMNS_IN_SELECT, nil + case "CLI_MAX_COLUMNS_IN_TABLE": return TGetInfoType_CLI_MAX_COLUMNS_IN_TABLE, nil + case "CLI_MAX_INDEX_SIZE": return TGetInfoType_CLI_MAX_INDEX_SIZE, nil + case "CLI_MAX_ROW_SIZE": return TGetInfoType_CLI_MAX_ROW_SIZE, nil + case "CLI_MAX_STATEMENT_LEN": return TGetInfoType_CLI_MAX_STATEMENT_LEN, nil + case "CLI_MAX_TABLES_IN_SELECT": return TGetInfoType_CLI_MAX_TABLES_IN_SELECT, nil + case "CLI_MAX_USER_NAME_LEN": return TGetInfoType_CLI_MAX_USER_NAME_LEN, nil + case "CLI_OJ_CAPABILITIES": return TGetInfoType_CLI_OJ_CAPABILITIES, nil + case "CLI_XOPEN_CLI_YEAR": return TGetInfoType_CLI_XOPEN_CLI_YEAR, nil + case "CLI_CURSOR_SENSITIVITY": return TGetInfoType_CLI_CURSOR_SENSITIVITY, nil + case "CLI_DESCRIBE_PARAMETER": return TGetInfoType_CLI_DESCRIBE_PARAMETER, nil + case "CLI_CATALOG_NAME": return TGetInfoType_CLI_CATALOG_NAME, nil + case "CLI_COLLATION_SEQ": return TGetInfoType_CLI_COLLATION_SEQ, nil + case "CLI_MAX_IDENTIFIER_LEN": return TGetInfoType_CLI_MAX_IDENTIFIER_LEN, nil + } + return TGetInfoType(0), fmt.Errorf("not a valid TGetInfoType string") +} + + +func TGetInfoTypePtr(v TGetInfoType) *TGetInfoType { return &v } + +func (p TGetInfoType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TGetInfoType) UnmarshalText(text []byte) error { +q, err := TGetInfoTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TGetInfoType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TGetInfoType(v) +return nil +} + +func (p * TGetInfoType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TResultPersistenceMode int64 +const ( + TResultPersistenceMode_ONLY_LARGE_RESULTS TResultPersistenceMode = 0 + TResultPersistenceMode_ALL_QUERY_RESULTS TResultPersistenceMode = 1 + TResultPersistenceMode_ALL_RESULTS TResultPersistenceMode = 2 +) + +func (p TResultPersistenceMode) String() string { + switch p { + case TResultPersistenceMode_ONLY_LARGE_RESULTS: return "ONLY_LARGE_RESULTS" + case TResultPersistenceMode_ALL_QUERY_RESULTS: return "ALL_QUERY_RESULTS" + case TResultPersistenceMode_ALL_RESULTS: return "ALL_RESULTS" + } + return "" +} + +func TResultPersistenceModeFromString(s string) (TResultPersistenceMode, error) { + switch s { + case "ONLY_LARGE_RESULTS": return TResultPersistenceMode_ONLY_LARGE_RESULTS, nil + case "ALL_QUERY_RESULTS": return TResultPersistenceMode_ALL_QUERY_RESULTS, nil + case "ALL_RESULTS": return TResultPersistenceMode_ALL_RESULTS, nil + } + return TResultPersistenceMode(0), fmt.Errorf("not a valid TResultPersistenceMode string") +} + + +func TResultPersistenceModePtr(v TResultPersistenceMode) *TResultPersistenceMode { return &v } + +func (p TResultPersistenceMode) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TResultPersistenceMode) UnmarshalText(text []byte) error { +q, err := TResultPersistenceModeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TResultPersistenceMode) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TResultPersistenceMode(v) +return nil +} + +func (p * TResultPersistenceMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TDBSqlCloseOperationReason int64 +const ( + TDBSqlCloseOperationReason_NONE TDBSqlCloseOperationReason = 0 + TDBSqlCloseOperationReason_COMMAND_INACTIVITY_TIMEOUT TDBSqlCloseOperationReason = 1 +) + +func (p TDBSqlCloseOperationReason) String() string { + switch p { + case TDBSqlCloseOperationReason_NONE: return "NONE" + case TDBSqlCloseOperationReason_COMMAND_INACTIVITY_TIMEOUT: return "COMMAND_INACTIVITY_TIMEOUT" + } + return "" +} + +func TDBSqlCloseOperationReasonFromString(s string) (TDBSqlCloseOperationReason, error) { + switch s { + case "NONE": return TDBSqlCloseOperationReason_NONE, nil + case "COMMAND_INACTIVITY_TIMEOUT": return TDBSqlCloseOperationReason_COMMAND_INACTIVITY_TIMEOUT, nil + } + return TDBSqlCloseOperationReason(0), fmt.Errorf("not a valid TDBSqlCloseOperationReason string") +} + + +func TDBSqlCloseOperationReasonPtr(v TDBSqlCloseOperationReason) *TDBSqlCloseOperationReason { return &v } + +func (p TDBSqlCloseOperationReason) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TDBSqlCloseOperationReason) UnmarshalText(text []byte) error { +q, err := TDBSqlCloseOperationReasonFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TDBSqlCloseOperationReason) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TDBSqlCloseOperationReason(v) +return nil +} + +func (p * TDBSqlCloseOperationReason) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TCacheLookupResult_ int64 +const ( + TCacheLookupResult__CACHE_INELIGIBLE TCacheLookupResult_ = 0 + TCacheLookupResult__LOCAL_CACHE_HIT TCacheLookupResult_ = 1 + TCacheLookupResult__REMOTE_CACHE_HIT TCacheLookupResult_ = 2 + TCacheLookupResult__CACHE_MISS TCacheLookupResult_ = 3 +) + +func (p TCacheLookupResult_) String() string { + switch p { + case TCacheLookupResult__CACHE_INELIGIBLE: return "CACHE_INELIGIBLE" + case TCacheLookupResult__LOCAL_CACHE_HIT: return "LOCAL_CACHE_HIT" + case TCacheLookupResult__REMOTE_CACHE_HIT: return "REMOTE_CACHE_HIT" + case TCacheLookupResult__CACHE_MISS: return "CACHE_MISS" + } + return "" +} + +func TCacheLookupResult_FromString(s string) (TCacheLookupResult_, error) { + switch s { + case "CACHE_INELIGIBLE": return TCacheLookupResult__CACHE_INELIGIBLE, nil + case "LOCAL_CACHE_HIT": return TCacheLookupResult__LOCAL_CACHE_HIT, nil + case "REMOTE_CACHE_HIT": return TCacheLookupResult__REMOTE_CACHE_HIT, nil + case "CACHE_MISS": return TCacheLookupResult__CACHE_MISS, nil + } + return TCacheLookupResult_(0), fmt.Errorf("not a valid TCacheLookupResult_ string") +} + + +func TCacheLookupResult_Ptr(v TCacheLookupResult_) *TCacheLookupResult_ { return &v } + +func (p TCacheLookupResult_) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TCacheLookupResult_) UnmarshalText(text []byte) error { +q, err := TCacheLookupResult_FromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TCacheLookupResult_) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TCacheLookupResult_(v) +return nil +} + +func (p * TCacheLookupResult_) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TCloudFetchDisabledReason int64 +const ( + TCloudFetchDisabledReason_ARROW_SUPPORT TCloudFetchDisabledReason = 0 + TCloudFetchDisabledReason_CLOUD_FETCH_SUPPORT TCloudFetchDisabledReason = 1 + TCloudFetchDisabledReason_PROTOCOL_VERSION TCloudFetchDisabledReason = 2 + TCloudFetchDisabledReason_REGION_SUPPORT TCloudFetchDisabledReason = 3 + TCloudFetchDisabledReason_BLOCKLISTED_OPERATION TCloudFetchDisabledReason = 4 + TCloudFetchDisabledReason_SMALL_RESULT_SIZE TCloudFetchDisabledReason = 5 + TCloudFetchDisabledReason_CUSTOMER_STORAGE_SUPPORT TCloudFetchDisabledReason = 6 + TCloudFetchDisabledReason_UNKNOWN TCloudFetchDisabledReason = 7 +) + +func (p TCloudFetchDisabledReason) String() string { + switch p { + case TCloudFetchDisabledReason_ARROW_SUPPORT: return "ARROW_SUPPORT" + case TCloudFetchDisabledReason_CLOUD_FETCH_SUPPORT: return "CLOUD_FETCH_SUPPORT" + case TCloudFetchDisabledReason_PROTOCOL_VERSION: return "PROTOCOL_VERSION" + case TCloudFetchDisabledReason_REGION_SUPPORT: return "REGION_SUPPORT" + case TCloudFetchDisabledReason_BLOCKLISTED_OPERATION: return "BLOCKLISTED_OPERATION" + case TCloudFetchDisabledReason_SMALL_RESULT_SIZE: return "SMALL_RESULT_SIZE" + case TCloudFetchDisabledReason_CUSTOMER_STORAGE_SUPPORT: return "CUSTOMER_STORAGE_SUPPORT" + case TCloudFetchDisabledReason_UNKNOWN: return "UNKNOWN" + } + return "" +} + +func TCloudFetchDisabledReasonFromString(s string) (TCloudFetchDisabledReason, error) { + switch s { + case "ARROW_SUPPORT": return TCloudFetchDisabledReason_ARROW_SUPPORT, nil + case "CLOUD_FETCH_SUPPORT": return TCloudFetchDisabledReason_CLOUD_FETCH_SUPPORT, nil + case "PROTOCOL_VERSION": return TCloudFetchDisabledReason_PROTOCOL_VERSION, nil + case "REGION_SUPPORT": return TCloudFetchDisabledReason_REGION_SUPPORT, nil + case "BLOCKLISTED_OPERATION": return TCloudFetchDisabledReason_BLOCKLISTED_OPERATION, nil + case "SMALL_RESULT_SIZE": return TCloudFetchDisabledReason_SMALL_RESULT_SIZE, nil + case "CUSTOMER_STORAGE_SUPPORT": return TCloudFetchDisabledReason_CUSTOMER_STORAGE_SUPPORT, nil + case "UNKNOWN": return TCloudFetchDisabledReason_UNKNOWN, nil + } + return TCloudFetchDisabledReason(0), fmt.Errorf("not a valid TCloudFetchDisabledReason string") +} + + +func TCloudFetchDisabledReasonPtr(v TCloudFetchDisabledReason) *TCloudFetchDisabledReason { return &v } + +func (p TCloudFetchDisabledReason) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TCloudFetchDisabledReason) UnmarshalText(text []byte) error { +q, err := TCloudFetchDisabledReasonFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TCloudFetchDisabledReason) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TCloudFetchDisabledReason(v) +return nil +} + +func (p * TCloudFetchDisabledReason) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TDBSqlManifestFileFormat int64 +const ( + TDBSqlManifestFileFormat_THRIFT_GET_RESULT_SET_METADATA_RESP TDBSqlManifestFileFormat = 0 +) + +func (p TDBSqlManifestFileFormat) String() string { + switch p { + case TDBSqlManifestFileFormat_THRIFT_GET_RESULT_SET_METADATA_RESP: return "THRIFT_GET_RESULT_SET_METADATA_RESP" + } + return "" +} + +func TDBSqlManifestFileFormatFromString(s string) (TDBSqlManifestFileFormat, error) { + switch s { + case "THRIFT_GET_RESULT_SET_METADATA_RESP": return TDBSqlManifestFileFormat_THRIFT_GET_RESULT_SET_METADATA_RESP, nil + } + return TDBSqlManifestFileFormat(0), fmt.Errorf("not a valid TDBSqlManifestFileFormat string") +} + + +func TDBSqlManifestFileFormatPtr(v TDBSqlManifestFileFormat) *TDBSqlManifestFileFormat { return &v } + +func (p TDBSqlManifestFileFormat) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TDBSqlManifestFileFormat) UnmarshalText(text []byte) error { +q, err := TDBSqlManifestFileFormatFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TDBSqlManifestFileFormat) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TDBSqlManifestFileFormat(v) +return nil +} + +func (p * TDBSqlManifestFileFormat) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TFetchOrientation int64 +const ( + TFetchOrientation_FETCH_NEXT TFetchOrientation = 0 + TFetchOrientation_FETCH_PRIOR TFetchOrientation = 1 + TFetchOrientation_FETCH_RELATIVE TFetchOrientation = 2 + TFetchOrientation_FETCH_ABSOLUTE TFetchOrientation = 3 + TFetchOrientation_FETCH_FIRST TFetchOrientation = 4 + TFetchOrientation_FETCH_LAST TFetchOrientation = 5 +) + +func (p TFetchOrientation) String() string { + switch p { + case TFetchOrientation_FETCH_NEXT: return "FETCH_NEXT" + case TFetchOrientation_FETCH_PRIOR: return "FETCH_PRIOR" + case TFetchOrientation_FETCH_RELATIVE: return "FETCH_RELATIVE" + case TFetchOrientation_FETCH_ABSOLUTE: return "FETCH_ABSOLUTE" + case TFetchOrientation_FETCH_FIRST: return "FETCH_FIRST" + case TFetchOrientation_FETCH_LAST: return "FETCH_LAST" + } + return "" +} + +func TFetchOrientationFromString(s string) (TFetchOrientation, error) { + switch s { + case "FETCH_NEXT": return TFetchOrientation_FETCH_NEXT, nil + case "FETCH_PRIOR": return TFetchOrientation_FETCH_PRIOR, nil + case "FETCH_RELATIVE": return TFetchOrientation_FETCH_RELATIVE, nil + case "FETCH_ABSOLUTE": return TFetchOrientation_FETCH_ABSOLUTE, nil + case "FETCH_FIRST": return TFetchOrientation_FETCH_FIRST, nil + case "FETCH_LAST": return TFetchOrientation_FETCH_LAST, nil + } + return TFetchOrientation(0), fmt.Errorf("not a valid TFetchOrientation string") +} + + +func TFetchOrientationPtr(v TFetchOrientation) *TFetchOrientation { return &v } + +func (p TFetchOrientation) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TFetchOrientation) UnmarshalText(text []byte) error { +q, err := TFetchOrientationFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TFetchOrientation) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TFetchOrientation(v) +return nil +} + +func (p * TFetchOrientation) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TDBSqlFetchDisposition int64 +const ( + TDBSqlFetchDisposition_DISPOSITION_UNSPECIFIED TDBSqlFetchDisposition = 0 + TDBSqlFetchDisposition_DISPOSITION_INLINE TDBSqlFetchDisposition = 1 + TDBSqlFetchDisposition_DISPOSITION_EXTERNAL_LINKS TDBSqlFetchDisposition = 2 + TDBSqlFetchDisposition_DISPOSITION_INTERNAL_DBFS TDBSqlFetchDisposition = 3 +) + +func (p TDBSqlFetchDisposition) String() string { + switch p { + case TDBSqlFetchDisposition_DISPOSITION_UNSPECIFIED: return "DISPOSITION_UNSPECIFIED" + case TDBSqlFetchDisposition_DISPOSITION_INLINE: return "DISPOSITION_INLINE" + case TDBSqlFetchDisposition_DISPOSITION_EXTERNAL_LINKS: return "DISPOSITION_EXTERNAL_LINKS" + case TDBSqlFetchDisposition_DISPOSITION_INTERNAL_DBFS: return "DISPOSITION_INTERNAL_DBFS" + } + return "" +} + +func TDBSqlFetchDispositionFromString(s string) (TDBSqlFetchDisposition, error) { + switch s { + case "DISPOSITION_UNSPECIFIED": return TDBSqlFetchDisposition_DISPOSITION_UNSPECIFIED, nil + case "DISPOSITION_INLINE": return TDBSqlFetchDisposition_DISPOSITION_INLINE, nil + case "DISPOSITION_EXTERNAL_LINKS": return TDBSqlFetchDisposition_DISPOSITION_EXTERNAL_LINKS, nil + case "DISPOSITION_INTERNAL_DBFS": return TDBSqlFetchDisposition_DISPOSITION_INTERNAL_DBFS, nil + } + return TDBSqlFetchDisposition(0), fmt.Errorf("not a valid TDBSqlFetchDisposition string") +} + + +func TDBSqlFetchDispositionPtr(v TDBSqlFetchDisposition) *TDBSqlFetchDisposition { return &v } + +func (p TDBSqlFetchDisposition) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TDBSqlFetchDisposition) UnmarshalText(text []byte) error { +q, err := TDBSqlFetchDispositionFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TDBSqlFetchDisposition) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TDBSqlFetchDisposition(v) +return nil +} + +func (p * TDBSqlFetchDisposition) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TJobExecutionStatus int64 +const ( + TJobExecutionStatus_IN_PROGRESS TJobExecutionStatus = 0 + TJobExecutionStatus_COMPLETE TJobExecutionStatus = 1 + TJobExecutionStatus_NOT_AVAILABLE TJobExecutionStatus = 2 +) + +func (p TJobExecutionStatus) String() string { + switch p { + case TJobExecutionStatus_IN_PROGRESS: return "IN_PROGRESS" + case TJobExecutionStatus_COMPLETE: return "COMPLETE" + case TJobExecutionStatus_NOT_AVAILABLE: return "NOT_AVAILABLE" + } + return "" +} + +func TJobExecutionStatusFromString(s string) (TJobExecutionStatus, error) { + switch s { + case "IN_PROGRESS": return TJobExecutionStatus_IN_PROGRESS, nil + case "COMPLETE": return TJobExecutionStatus_COMPLETE, nil + case "NOT_AVAILABLE": return TJobExecutionStatus_NOT_AVAILABLE, nil + } + return TJobExecutionStatus(0), fmt.Errorf("not a valid TJobExecutionStatus string") +} + + +func TJobExecutionStatusPtr(v TJobExecutionStatus) *TJobExecutionStatus { return &v } + +func (p TJobExecutionStatus) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TJobExecutionStatus) UnmarshalText(text []byte) error { +q, err := TJobExecutionStatusFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TJobExecutionStatus) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TJobExecutionStatus(v) +return nil +} + +func (p * TJobExecutionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type TTypeEntryPtr int32 + +func TTypeEntryPtrPtr(v TTypeEntryPtr) *TTypeEntryPtr { return &v } + +type TIdentifier string + +func TIdentifierPtr(v TIdentifier) *TIdentifier { return &v } + +type TPattern string + +func TPatternPtr(v TPattern) *TPattern { return &v } + +type TPatternOrIdentifier string + +func TPatternOrIdentifierPtr(v TPatternOrIdentifier) *TPatternOrIdentifier { return &v } + +type TSparkParameterList []*TSparkParameter + +func TSparkParameterListPtr(v TSparkParameterList) *TSparkParameterList { return &v } + +// Attributes: +// - I32Value +// - StringValue +type TTypeQualifierValue struct { + I32Value *int32 `thrift:"i32Value,1" db:"i32Value" json:"i32Value,omitempty"` + StringValue *string `thrift:"stringValue,2" db:"stringValue" json:"stringValue,omitempty"` +} + +func NewTTypeQualifierValue() *TTypeQualifierValue { + return &TTypeQualifierValue{} +} + +var TTypeQualifierValue_I32Value_DEFAULT int32 +func (p *TTypeQualifierValue) GetI32Value() int32 { + if !p.IsSetI32Value() { + return TTypeQualifierValue_I32Value_DEFAULT + } +return *p.I32Value +} +var TTypeQualifierValue_StringValue_DEFAULT string +func (p *TTypeQualifierValue) GetStringValue() string { + if !p.IsSetStringValue() { + return TTypeQualifierValue_StringValue_DEFAULT + } +return *p.StringValue +} +func (p *TTypeQualifierValue) CountSetFieldsTTypeQualifierValue() int { + count := 0 + if (p.IsSetI32Value()) { + count++ + } + if (p.IsSetStringValue()) { + count++ + } + return count + +} + +func (p *TTypeQualifierValue) IsSetI32Value() bool { + return p.I32Value != nil +} + +func (p *TTypeQualifierValue) IsSetStringValue() bool { + return p.StringValue != nil +} + +func (p *TTypeQualifierValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TTypeQualifierValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.I32Value = &v +} + return nil +} + +func (p *TTypeQualifierValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.StringValue = &v +} + return nil +} + +func (p *TTypeQualifierValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTTypeQualifierValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TTypeQualifierValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TTypeQualifierValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI32Value() { + if err := oprot.WriteFieldBegin(ctx, "i32Value", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:i32Value: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.I32Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.i32Value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:i32Value: ", p), err) } + } + return err +} + +func (p *TTypeQualifierValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStringValue() { + if err := oprot.WriteFieldBegin(ctx, "stringValue", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stringValue: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.StringValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stringValue (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stringValue: ", p), err) } + } + return err +} + +func (p *TTypeQualifierValue) Equals(other *TTypeQualifierValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.I32Value != other.I32Value { + if p.I32Value == nil || other.I32Value == nil { + return false + } + if (*p.I32Value) != (*other.I32Value) { return false } + } + if p.StringValue != other.StringValue { + if p.StringValue == nil || other.StringValue == nil { + return false + } + if (*p.StringValue) != (*other.StringValue) { return false } + } + return true +} + +func (p *TTypeQualifierValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTypeQualifierValue(%+v)", *p) +} + +func (p *TTypeQualifierValue) Validate() error { + return nil +} +// Attributes: +// - Qualifiers +type TTypeQualifiers struct { + Qualifiers map[string]*TTypeQualifierValue `thrift:"qualifiers,1,required" db:"qualifiers" json:"qualifiers"` +} + +func NewTTypeQualifiers() *TTypeQualifiers { + return &TTypeQualifiers{} +} + + +func (p *TTypeQualifiers) GetQualifiers() map[string]*TTypeQualifierValue { + return p.Qualifiers +} +func (p *TTypeQualifiers) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetQualifiers bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetQualifiers = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetQualifiers{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Qualifiers is not set")); + } + return nil +} + +func (p *TTypeQualifiers) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]*TTypeQualifierValue, size) + p.Qualifiers = tMap + for i := 0; i < size; i ++ { +var _key0 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key0 = v +} + _val1 := &TTypeQualifierValue{} + if err := _val1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val1), err) + } + p.Qualifiers[_key0] = _val1 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TTypeQualifiers) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TTypeQualifiers"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TTypeQualifiers) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "qualifiers", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:qualifiers: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRUCT, len(p.Qualifiers)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Qualifiers { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:qualifiers: ", p), err) } + return err +} + +func (p *TTypeQualifiers) Equals(other *TTypeQualifiers) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Qualifiers) != len(other.Qualifiers) { return false } + for k, _tgt := range p.Qualifiers { + _src2 := other.Qualifiers[k] + if !_tgt.Equals(_src2) { return false } + } + return true +} + +func (p *TTypeQualifiers) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTypeQualifiers(%+v)", *p) +} + +func (p *TTypeQualifiers) Validate() error { + return nil +} +// Attributes: +// - Type +// - TypeQualifiers +type TPrimitiveTypeEntry struct { + Type TTypeId `thrift:"type,1,required" db:"type" json:"type"` + TypeQualifiers *TTypeQualifiers `thrift:"typeQualifiers,2" db:"typeQualifiers" json:"typeQualifiers,omitempty"` +} + +func NewTPrimitiveTypeEntry() *TPrimitiveTypeEntry { + return &TPrimitiveTypeEntry{} +} + + +func (p *TPrimitiveTypeEntry) GetType() TTypeId { + return p.Type +} +var TPrimitiveTypeEntry_TypeQualifiers_DEFAULT *TTypeQualifiers +func (p *TPrimitiveTypeEntry) GetTypeQualifiers() *TTypeQualifiers { + if !p.IsSetTypeQualifiers() { + return TPrimitiveTypeEntry_TypeQualifiers_DEFAULT + } +return p.TypeQualifiers +} +func (p *TPrimitiveTypeEntry) IsSetTypeQualifiers() bool { + return p.TypeQualifiers != nil +} + +func (p *TPrimitiveTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetType bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); + } + return nil +} + +func (p *TPrimitiveTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TTypeId(v) + p.Type = temp +} + return nil +} + +func (p *TPrimitiveTypeEntry) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.TypeQualifiers = &TTypeQualifiers{} + if err := p.TypeQualifiers.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TypeQualifiers), err) + } + return nil +} + +func (p *TPrimitiveTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TPrimitiveTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TPrimitiveTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } + return err +} + +func (p *TPrimitiveTypeEntry) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTypeQualifiers() { + if err := oprot.WriteFieldBegin(ctx, "typeQualifiers", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:typeQualifiers: ", p), err) } + if err := p.TypeQualifiers.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TypeQualifiers), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:typeQualifiers: ", p), err) } + } + return err +} + +func (p *TPrimitiveTypeEntry) Equals(other *TPrimitiveTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Type != other.Type { return false } + if !p.TypeQualifiers.Equals(other.TypeQualifiers) { return false } + return true +} + +func (p *TPrimitiveTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TPrimitiveTypeEntry(%+v)", *p) +} + +func (p *TPrimitiveTypeEntry) Validate() error { + return nil +} +// Attributes: +// - ObjectTypePtr +type TArrayTypeEntry struct { + ObjectTypePtr TTypeEntryPtr `thrift:"objectTypePtr,1,required" db:"objectTypePtr" json:"objectTypePtr"` +} + +func NewTArrayTypeEntry() *TArrayTypeEntry { + return &TArrayTypeEntry{} +} + + +func (p *TArrayTypeEntry) GetObjectTypePtr() TTypeEntryPtr { + return p.ObjectTypePtr +} +func (p *TArrayTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetObjectTypePtr bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetObjectTypePtr = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetObjectTypePtr{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ObjectTypePtr is not set")); + } + return nil +} + +func (p *TArrayTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TTypeEntryPtr(v) + p.ObjectTypePtr = temp +} + return nil +} + +func (p *TArrayTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TArrayTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TArrayTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "objectTypePtr", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:objectTypePtr: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.ObjectTypePtr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.objectTypePtr (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:objectTypePtr: ", p), err) } + return err +} + +func (p *TArrayTypeEntry) Equals(other *TArrayTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ObjectTypePtr != other.ObjectTypePtr { return false } + return true +} + +func (p *TArrayTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TArrayTypeEntry(%+v)", *p) +} + +func (p *TArrayTypeEntry) Validate() error { + return nil +} +// Attributes: +// - KeyTypePtr +// - ValueTypePtr +type TMapTypeEntry struct { + KeyTypePtr TTypeEntryPtr `thrift:"keyTypePtr,1,required" db:"keyTypePtr" json:"keyTypePtr"` + ValueTypePtr TTypeEntryPtr `thrift:"valueTypePtr,2,required" db:"valueTypePtr" json:"valueTypePtr"` +} + +func NewTMapTypeEntry() *TMapTypeEntry { + return &TMapTypeEntry{} +} + + +func (p *TMapTypeEntry) GetKeyTypePtr() TTypeEntryPtr { + return p.KeyTypePtr +} + +func (p *TMapTypeEntry) GetValueTypePtr() TTypeEntryPtr { + return p.ValueTypePtr +} +func (p *TMapTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKeyTypePtr bool = false; + var issetValueTypePtr bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetKeyTypePtr = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetValueTypePtr = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKeyTypePtr{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field KeyTypePtr is not set")); + } + if !issetValueTypePtr{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ValueTypePtr is not set")); + } + return nil +} + +func (p *TMapTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TTypeEntryPtr(v) + p.KeyTypePtr = temp +} + return nil +} + +func (p *TMapTypeEntry) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TTypeEntryPtr(v) + p.ValueTypePtr = temp +} + return nil +} + +func (p *TMapTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TMapTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TMapTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "keyTypePtr", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:keyTypePtr: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.KeyTypePtr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.keyTypePtr (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:keyTypePtr: ", p), err) } + return err +} + +func (p *TMapTypeEntry) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "valueTypePtr", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:valueTypePtr: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.ValueTypePtr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.valueTypePtr (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:valueTypePtr: ", p), err) } + return err +} + +func (p *TMapTypeEntry) Equals(other *TMapTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.KeyTypePtr != other.KeyTypePtr { return false } + if p.ValueTypePtr != other.ValueTypePtr { return false } + return true +} + +func (p *TMapTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TMapTypeEntry(%+v)", *p) +} + +func (p *TMapTypeEntry) Validate() error { + return nil +} +// Attributes: +// - NameToTypePtr +type TStructTypeEntry struct { + NameToTypePtr map[string]TTypeEntryPtr `thrift:"nameToTypePtr,1,required" db:"nameToTypePtr" json:"nameToTypePtr"` +} + +func NewTStructTypeEntry() *TStructTypeEntry { + return &TStructTypeEntry{} +} + + +func (p *TStructTypeEntry) GetNameToTypePtr() map[string]TTypeEntryPtr { + return p.NameToTypePtr +} +func (p *TStructTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNameToTypePtr bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNameToTypePtr = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNameToTypePtr{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NameToTypePtr is not set")); + } + return nil +} + +func (p *TStructTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]TTypeEntryPtr, size) + p.NameToTypePtr = tMap + for i := 0; i < size; i ++ { +var _key3 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key3 = v +} +var _val4 TTypeEntryPtr + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := TTypeEntryPtr(v) + _val4 = temp +} + p.NameToTypePtr[_key3] = _val4 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TStructTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TStructTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TStructTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nameToTypePtr", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:nameToTypePtr: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.I32, len(p.NameToTypePtr)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.NameToTypePtr { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteI32(ctx, int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:nameToTypePtr: ", p), err) } + return err +} + +func (p *TStructTypeEntry) Equals(other *TStructTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.NameToTypePtr) != len(other.NameToTypePtr) { return false } + for k, _tgt := range p.NameToTypePtr { + _src5 := other.NameToTypePtr[k] + if _tgt != _src5 { return false } + } + return true +} + +func (p *TStructTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStructTypeEntry(%+v)", *p) +} + +func (p *TStructTypeEntry) Validate() error { + return nil +} +// Attributes: +// - NameToTypePtr +type TUnionTypeEntry struct { + NameToTypePtr map[string]TTypeEntryPtr `thrift:"nameToTypePtr,1,required" db:"nameToTypePtr" json:"nameToTypePtr"` +} + +func NewTUnionTypeEntry() *TUnionTypeEntry { + return &TUnionTypeEntry{} +} + + +func (p *TUnionTypeEntry) GetNameToTypePtr() map[string]TTypeEntryPtr { + return p.NameToTypePtr +} +func (p *TUnionTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetNameToTypePtr bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetNameToTypePtr = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetNameToTypePtr{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NameToTypePtr is not set")); + } + return nil +} + +func (p *TUnionTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]TTypeEntryPtr, size) + p.NameToTypePtr = tMap + for i := 0; i < size; i ++ { +var _key6 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key6 = v +} +var _val7 TTypeEntryPtr + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := TTypeEntryPtr(v) + _val7 = temp +} + p.NameToTypePtr[_key6] = _val7 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TUnionTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TUnionTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TUnionTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nameToTypePtr", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:nameToTypePtr: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.I32, len(p.NameToTypePtr)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.NameToTypePtr { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteI32(ctx, int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:nameToTypePtr: ", p), err) } + return err +} + +func (p *TUnionTypeEntry) Equals(other *TUnionTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.NameToTypePtr) != len(other.NameToTypePtr) { return false } + for k, _tgt := range p.NameToTypePtr { + _src8 := other.NameToTypePtr[k] + if _tgt != _src8 { return false } + } + return true +} + +func (p *TUnionTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TUnionTypeEntry(%+v)", *p) +} + +func (p *TUnionTypeEntry) Validate() error { + return nil +} +// Attributes: +// - TypeClassName +type TUserDefinedTypeEntry struct { + TypeClassName string `thrift:"typeClassName,1,required" db:"typeClassName" json:"typeClassName"` +} + +func NewTUserDefinedTypeEntry() *TUserDefinedTypeEntry { + return &TUserDefinedTypeEntry{} +} + + +func (p *TUserDefinedTypeEntry) GetTypeClassName() string { + return p.TypeClassName +} +func (p *TUserDefinedTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTypeClassName bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTypeClassName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTypeClassName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TypeClassName is not set")); + } + return nil +} + +func (p *TUserDefinedTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.TypeClassName = v +} + return nil +} + +func (p *TUserDefinedTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TUserDefinedTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TUserDefinedTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "typeClassName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:typeClassName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.TypeClassName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.typeClassName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:typeClassName: ", p), err) } + return err +} + +func (p *TUserDefinedTypeEntry) Equals(other *TUserDefinedTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TypeClassName != other.TypeClassName { return false } + return true +} + +func (p *TUserDefinedTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TUserDefinedTypeEntry(%+v)", *p) +} + +func (p *TUserDefinedTypeEntry) Validate() error { + return nil +} +// Attributes: +// - PrimitiveEntry +// - ArrayEntry +// - MapEntry +// - StructEntry +// - UnionEntry +// - UserDefinedTypeEntry +type TTypeEntry struct { + PrimitiveEntry *TPrimitiveTypeEntry `thrift:"primitiveEntry,1" db:"primitiveEntry" json:"primitiveEntry,omitempty"` + ArrayEntry *TArrayTypeEntry `thrift:"arrayEntry,2" db:"arrayEntry" json:"arrayEntry,omitempty"` + MapEntry *TMapTypeEntry `thrift:"mapEntry,3" db:"mapEntry" json:"mapEntry,omitempty"` + StructEntry *TStructTypeEntry `thrift:"structEntry,4" db:"structEntry" json:"structEntry,omitempty"` + UnionEntry *TUnionTypeEntry `thrift:"unionEntry,5" db:"unionEntry" json:"unionEntry,omitempty"` + UserDefinedTypeEntry *TUserDefinedTypeEntry `thrift:"userDefinedTypeEntry,6" db:"userDefinedTypeEntry" json:"userDefinedTypeEntry,omitempty"` +} + +func NewTTypeEntry() *TTypeEntry { + return &TTypeEntry{} +} + +var TTypeEntry_PrimitiveEntry_DEFAULT *TPrimitiveTypeEntry +func (p *TTypeEntry) GetPrimitiveEntry() *TPrimitiveTypeEntry { + if !p.IsSetPrimitiveEntry() { + return TTypeEntry_PrimitiveEntry_DEFAULT + } +return p.PrimitiveEntry +} +var TTypeEntry_ArrayEntry_DEFAULT *TArrayTypeEntry +func (p *TTypeEntry) GetArrayEntry() *TArrayTypeEntry { + if !p.IsSetArrayEntry() { + return TTypeEntry_ArrayEntry_DEFAULT + } +return p.ArrayEntry +} +var TTypeEntry_MapEntry_DEFAULT *TMapTypeEntry +func (p *TTypeEntry) GetMapEntry() *TMapTypeEntry { + if !p.IsSetMapEntry() { + return TTypeEntry_MapEntry_DEFAULT + } +return p.MapEntry +} +var TTypeEntry_StructEntry_DEFAULT *TStructTypeEntry +func (p *TTypeEntry) GetStructEntry() *TStructTypeEntry { + if !p.IsSetStructEntry() { + return TTypeEntry_StructEntry_DEFAULT + } +return p.StructEntry +} +var TTypeEntry_UnionEntry_DEFAULT *TUnionTypeEntry +func (p *TTypeEntry) GetUnionEntry() *TUnionTypeEntry { + if !p.IsSetUnionEntry() { + return TTypeEntry_UnionEntry_DEFAULT + } +return p.UnionEntry +} +var TTypeEntry_UserDefinedTypeEntry_DEFAULT *TUserDefinedTypeEntry +func (p *TTypeEntry) GetUserDefinedTypeEntry() *TUserDefinedTypeEntry { + if !p.IsSetUserDefinedTypeEntry() { + return TTypeEntry_UserDefinedTypeEntry_DEFAULT + } +return p.UserDefinedTypeEntry +} +func (p *TTypeEntry) CountSetFieldsTTypeEntry() int { + count := 0 + if (p.IsSetPrimitiveEntry()) { + count++ + } + if (p.IsSetArrayEntry()) { + count++ + } + if (p.IsSetMapEntry()) { + count++ + } + if (p.IsSetStructEntry()) { + count++ + } + if (p.IsSetUnionEntry()) { + count++ + } + if (p.IsSetUserDefinedTypeEntry()) { + count++ + } + return count + +} + +func (p *TTypeEntry) IsSetPrimitiveEntry() bool { + return p.PrimitiveEntry != nil +} + +func (p *TTypeEntry) IsSetArrayEntry() bool { + return p.ArrayEntry != nil +} + +func (p *TTypeEntry) IsSetMapEntry() bool { + return p.MapEntry != nil +} + +func (p *TTypeEntry) IsSetStructEntry() bool { + return p.StructEntry != nil +} + +func (p *TTypeEntry) IsSetUnionEntry() bool { + return p.UnionEntry != nil +} + +func (p *TTypeEntry) IsSetUserDefinedTypeEntry() bool { + return p.UserDefinedTypeEntry != nil +} + +func (p *TTypeEntry) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TTypeEntry) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.PrimitiveEntry = &TPrimitiveTypeEntry{} + if err := p.PrimitiveEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.PrimitiveEntry), err) + } + return nil +} + +func (p *TTypeEntry) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ArrayEntry = &TArrayTypeEntry{} + if err := p.ArrayEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ArrayEntry), err) + } + return nil +} + +func (p *TTypeEntry) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.MapEntry = &TMapTypeEntry{} + if err := p.MapEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MapEntry), err) + } + return nil +} + +func (p *TTypeEntry) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.StructEntry = &TStructTypeEntry{} + if err := p.StructEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StructEntry), err) + } + return nil +} + +func (p *TTypeEntry) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.UnionEntry = &TUnionTypeEntry{} + if err := p.UnionEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UnionEntry), err) + } + return nil +} + +func (p *TTypeEntry) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + p.UserDefinedTypeEntry = &TUserDefinedTypeEntry{} + if err := p.UserDefinedTypeEntry.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UserDefinedTypeEntry), err) + } + return nil +} + +func (p *TTypeEntry) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTTypeEntry(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TTypeEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TTypeEntry) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPrimitiveEntry() { + if err := oprot.WriteFieldBegin(ctx, "primitiveEntry", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:primitiveEntry: ", p), err) } + if err := p.PrimitiveEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.PrimitiveEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:primitiveEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArrayEntry() { + if err := oprot.WriteFieldBegin(ctx, "arrayEntry", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:arrayEntry: ", p), err) } + if err := p.ArrayEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ArrayEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:arrayEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMapEntry() { + if err := oprot.WriteFieldBegin(ctx, "mapEntry", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:mapEntry: ", p), err) } + if err := p.MapEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MapEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:mapEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStructEntry() { + if err := oprot.WriteFieldBegin(ctx, "structEntry", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:structEntry: ", p), err) } + if err := p.StructEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StructEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:structEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUnionEntry() { + if err := oprot.WriteFieldBegin(ctx, "unionEntry", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:unionEntry: ", p), err) } + if err := p.UnionEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UnionEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:unionEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUserDefinedTypeEntry() { + if err := oprot.WriteFieldBegin(ctx, "userDefinedTypeEntry", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:userDefinedTypeEntry: ", p), err) } + if err := p.UserDefinedTypeEntry.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UserDefinedTypeEntry), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:userDefinedTypeEntry: ", p), err) } + } + return err +} + +func (p *TTypeEntry) Equals(other *TTypeEntry) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.PrimitiveEntry.Equals(other.PrimitiveEntry) { return false } + if !p.ArrayEntry.Equals(other.ArrayEntry) { return false } + if !p.MapEntry.Equals(other.MapEntry) { return false } + if !p.StructEntry.Equals(other.StructEntry) { return false } + if !p.UnionEntry.Equals(other.UnionEntry) { return false } + if !p.UserDefinedTypeEntry.Equals(other.UserDefinedTypeEntry) { return false } + return true +} + +func (p *TTypeEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTypeEntry(%+v)", *p) +} + +func (p *TTypeEntry) Validate() error { + return nil +} +// Attributes: +// - Types +type TTypeDesc struct { + Types []*TTypeEntry `thrift:"types,1,required" db:"types" json:"types"` +} + +func NewTTypeDesc() *TTypeDesc { + return &TTypeDesc{} +} + + +func (p *TTypeDesc) GetTypes() []*TTypeEntry { + return p.Types +} +func (p *TTypeDesc) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTypes bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTypes = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTypes{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Types is not set")); + } + return nil +} + +func (p *TTypeDesc) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TTypeEntry, 0, size) + p.Types = tSlice + for i := 0; i < size; i ++ { + _elem9 := &TTypeEntry{} + if err := _elem9.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Types = append(p.Types, _elem9) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TTypeDesc) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TTypeDesc"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TTypeDesc) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "types", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:types: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Types)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Types { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:types: ", p), err) } + return err +} + +func (p *TTypeDesc) Equals(other *TTypeDesc) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Types) != len(other.Types) { return false } + for i, _tgt := range p.Types { + _src10 := other.Types[i] + if !_tgt.Equals(_src10) { return false } + } + return true +} + +func (p *TTypeDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTypeDesc(%+v)", *p) +} + +func (p *TTypeDesc) Validate() error { + return nil +} +// Attributes: +// - ColumnName +// - TypeDesc +// - Position +// - Comment +type TColumnDesc struct { + ColumnName string `thrift:"columnName,1,required" db:"columnName" json:"columnName"` + TypeDesc *TTypeDesc `thrift:"typeDesc,2,required" db:"typeDesc" json:"typeDesc"` + Position int32 `thrift:"position,3,required" db:"position" json:"position"` + Comment *string `thrift:"comment,4" db:"comment" json:"comment,omitempty"` +} + +func NewTColumnDesc() *TColumnDesc { + return &TColumnDesc{} +} + + +func (p *TColumnDesc) GetColumnName() string { + return p.ColumnName +} +var TColumnDesc_TypeDesc_DEFAULT *TTypeDesc +func (p *TColumnDesc) GetTypeDesc() *TTypeDesc { + if !p.IsSetTypeDesc() { + return TColumnDesc_TypeDesc_DEFAULT + } +return p.TypeDesc +} + +func (p *TColumnDesc) GetPosition() int32 { + return p.Position +} +var TColumnDesc_Comment_DEFAULT string +func (p *TColumnDesc) GetComment() string { + if !p.IsSetComment() { + return TColumnDesc_Comment_DEFAULT + } +return *p.Comment +} +func (p *TColumnDesc) IsSetTypeDesc() bool { + return p.TypeDesc != nil +} + +func (p *TColumnDesc) IsSetComment() bool { + return p.Comment != nil +} + +func (p *TColumnDesc) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetColumnName bool = false; + var issetTypeDesc bool = false; + var issetPosition bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetColumnName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTypeDesc = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetPosition = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetColumnName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColumnName is not set")); + } + if !issetTypeDesc{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TypeDesc is not set")); + } + if !issetPosition{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Position is not set")); + } + return nil +} + +func (p *TColumnDesc) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ColumnName = v +} + return nil +} + +func (p *TColumnDesc) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.TypeDesc = &TTypeDesc{} + if err := p.TypeDesc.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TypeDesc), err) + } + return nil +} + +func (p *TColumnDesc) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Position = v +} + return nil +} + +func (p *TColumnDesc) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Comment = &v +} + return nil +} + +func (p *TColumnDesc) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TColumnDesc"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TColumnDesc) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "columnName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columnName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.ColumnName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.columnName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columnName: ", p), err) } + return err +} + +func (p *TColumnDesc) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "typeDesc", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:typeDesc: ", p), err) } + if err := p.TypeDesc.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TypeDesc), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:typeDesc: ", p), err) } + return err +} + +func (p *TColumnDesc) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "position", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:position: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Position)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.position (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:position: ", p), err) } + return err +} + +func (p *TColumnDesc) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetComment() { + if err := oprot.WriteFieldBegin(ctx, "comment", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:comment: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Comment)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.comment (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:comment: ", p), err) } + } + return err +} + +func (p *TColumnDesc) Equals(other *TColumnDesc) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ColumnName != other.ColumnName { return false } + if !p.TypeDesc.Equals(other.TypeDesc) { return false } + if p.Position != other.Position { return false } + if p.Comment != other.Comment { + if p.Comment == nil || other.Comment == nil { + return false + } + if (*p.Comment) != (*other.Comment) { return false } + } + return true +} + +func (p *TColumnDesc) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TColumnDesc(%+v)", *p) +} + +func (p *TColumnDesc) Validate() error { + return nil +} +// Attributes: +// - Columns +type TTableSchema struct { + Columns []*TColumnDesc `thrift:"columns,1,required" db:"columns" json:"columns"` +} + +func NewTTableSchema() *TTableSchema { + return &TTableSchema{} +} + + +func (p *TTableSchema) GetColumns() []*TColumnDesc { + return p.Columns +} +func (p *TTableSchema) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetColumns bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetColumns = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetColumns{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Columns is not set")); + } + return nil +} + +func (p *TTableSchema) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TColumnDesc, 0, size) + p.Columns = tSlice + for i := 0; i < size; i ++ { + _elem11 := &TColumnDesc{} + if err := _elem11.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) + } + p.Columns = append(p.Columns, _elem11) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TTableSchema) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TTableSchema"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TTableSchema) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "columns", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columns: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Columns)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Columns { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columns: ", p), err) } + return err +} + +func (p *TTableSchema) Equals(other *TTableSchema) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Columns) != len(other.Columns) { return false } + for i, _tgt := range p.Columns { + _src12 := other.Columns[i] + if !_tgt.Equals(_src12) { return false } + } + return true +} + +func (p *TTableSchema) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTableSchema(%+v)", *p) +} + +func (p *TTableSchema) Validate() error { + return nil +} +// Attributes: +// - Value +type TBoolValue struct { + Value *bool `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTBoolValue() *TBoolValue { + return &TBoolValue{} +} + +var TBoolValue_Value_DEFAULT bool +func (p *TBoolValue) GetValue() bool { + if !p.IsSetValue() { + return TBoolValue_Value_DEFAULT + } +return *p.Value +} +func (p *TBoolValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *TBoolValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TBoolValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TBoolValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TBoolValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TBoolValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TBoolValue) Equals(other *TBoolValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TBoolValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBoolValue(%+v)", *p) +} + +func (p *TBoolValue) Validate() error { + return nil +} +// Attributes: +// - Value +type TByteValue struct { + Value *int8 `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTByteValue() *TByteValue { + return &TByteValue{} +} + +var TByteValue_Value_DEFAULT int8 +func (p *TByteValue) GetValue() int8 { + if !p.IsSetValue() { + return TByteValue_Value_DEFAULT + } +return *p.Value +} +func (p *TByteValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *TByteValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BYTE { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TByteValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadByte(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := int8(v) + p.Value = &temp +} + return nil +} + +func (p *TByteValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TByteValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TByteValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.BYTE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteByte(ctx, int8(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TByteValue) Equals(other *TByteValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TByteValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TByteValue(%+v)", *p) +} + +func (p *TByteValue) Validate() error { + return nil +} +// Attributes: +// - Value +type TI16Value struct { + Value *int16 `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTI16Value() *TI16Value { + return &TI16Value{} +} + +var TI16Value_Value_DEFAULT int16 +func (p *TI16Value) GetValue() int16 { + if !p.IsSetValue() { + return TI16Value_Value_DEFAULT + } +return *p.Value +} +func (p *TI16Value) IsSetValue() bool { + return p.Value != nil +} + +func (p *TI16Value) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I16 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TI16Value) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TI16Value) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI16Value"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI16Value) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.I16, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TI16Value) Equals(other *TI16Value) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TI16Value) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI16Value(%+v)", *p) +} + +func (p *TI16Value) Validate() error { + return nil +} +// Attributes: +// - Value +type TI32Value struct { + Value *int32 `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTI32Value() *TI32Value { + return &TI32Value{} +} + +var TI32Value_Value_DEFAULT int32 +func (p *TI32Value) GetValue() int32 { + if !p.IsSetValue() { + return TI32Value_Value_DEFAULT + } +return *p.Value +} +func (p *TI32Value) IsSetValue() bool { + return p.Value != nil +} + +func (p *TI32Value) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TI32Value) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TI32Value) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI32Value"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI32Value) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TI32Value) Equals(other *TI32Value) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TI32Value) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI32Value(%+v)", *p) +} + +func (p *TI32Value) Validate() error { + return nil +} +// Attributes: +// - Value +type TI64Value struct { + Value *int64 `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTI64Value() *TI64Value { + return &TI64Value{} +} + +var TI64Value_Value_DEFAULT int64 +func (p *TI64Value) GetValue() int64 { + if !p.IsSetValue() { + return TI64Value_Value_DEFAULT + } +return *p.Value +} +func (p *TI64Value) IsSetValue() bool { + return p.Value != nil +} + +func (p *TI64Value) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TI64Value) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TI64Value) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI64Value"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI64Value) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TI64Value) Equals(other *TI64Value) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TI64Value) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI64Value(%+v)", *p) +} + +func (p *TI64Value) Validate() error { + return nil +} +// Attributes: +// - Value +type TDoubleValue struct { + Value *float64 `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTDoubleValue() *TDoubleValue { + return &TDoubleValue{} +} + +var TDoubleValue_Value_DEFAULT float64 +func (p *TDoubleValue) GetValue() float64 { + if !p.IsSetValue() { + return TDoubleValue_Value_DEFAULT + } +return *p.Value +} +func (p *TDoubleValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *TDoubleValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDoubleValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TDoubleValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDoubleValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDoubleValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.DOUBLE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TDoubleValue) Equals(other *TDoubleValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TDoubleValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDoubleValue(%+v)", *p) +} + +func (p *TDoubleValue) Validate() error { + return nil +} +// Attributes: +// - Value +type TStringValue struct { + Value *string `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTStringValue() *TStringValue { + return &TStringValue{} +} + +var TStringValue_Value_DEFAULT string +func (p *TStringValue) GetValue() string { + if !p.IsSetValue() { + return TStringValue_Value_DEFAULT + } +return *p.Value +} +func (p *TStringValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *TStringValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TStringValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TStringValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TStringValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TStringValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TStringValue) Equals(other *TStringValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TStringValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStringValue(%+v)", *p) +} + +func (p *TStringValue) Validate() error { + return nil +} +// Attributes: +// - BoolVal +// - ByteVal +// - I16Val +// - I32Val +// - I64Val +// - DoubleVal +// - StringVal +type TColumnValue struct { + BoolVal *TBoolValue `thrift:"boolVal,1" db:"boolVal" json:"boolVal,omitempty"` + ByteVal *TByteValue `thrift:"byteVal,2" db:"byteVal" json:"byteVal,omitempty"` + I16Val *TI16Value `thrift:"i16Val,3" db:"i16Val" json:"i16Val,omitempty"` + I32Val *TI32Value `thrift:"i32Val,4" db:"i32Val" json:"i32Val,omitempty"` + I64Val *TI64Value `thrift:"i64Val,5" db:"i64Val" json:"i64Val,omitempty"` + DoubleVal *TDoubleValue `thrift:"doubleVal,6" db:"doubleVal" json:"doubleVal,omitempty"` + StringVal *TStringValue `thrift:"stringVal,7" db:"stringVal" json:"stringVal,omitempty"` +} + +func NewTColumnValue() *TColumnValue { + return &TColumnValue{} +} + +var TColumnValue_BoolVal_DEFAULT *TBoolValue +func (p *TColumnValue) GetBoolVal() *TBoolValue { + if !p.IsSetBoolVal() { + return TColumnValue_BoolVal_DEFAULT + } +return p.BoolVal +} +var TColumnValue_ByteVal_DEFAULT *TByteValue +func (p *TColumnValue) GetByteVal() *TByteValue { + if !p.IsSetByteVal() { + return TColumnValue_ByteVal_DEFAULT + } +return p.ByteVal +} +var TColumnValue_I16Val_DEFAULT *TI16Value +func (p *TColumnValue) GetI16Val() *TI16Value { + if !p.IsSetI16Val() { + return TColumnValue_I16Val_DEFAULT + } +return p.I16Val +} +var TColumnValue_I32Val_DEFAULT *TI32Value +func (p *TColumnValue) GetI32Val() *TI32Value { + if !p.IsSetI32Val() { + return TColumnValue_I32Val_DEFAULT + } +return p.I32Val +} +var TColumnValue_I64Val_DEFAULT *TI64Value +func (p *TColumnValue) GetI64Val() *TI64Value { + if !p.IsSetI64Val() { + return TColumnValue_I64Val_DEFAULT + } +return p.I64Val +} +var TColumnValue_DoubleVal_DEFAULT *TDoubleValue +func (p *TColumnValue) GetDoubleVal() *TDoubleValue { + if !p.IsSetDoubleVal() { + return TColumnValue_DoubleVal_DEFAULT + } +return p.DoubleVal +} +var TColumnValue_StringVal_DEFAULT *TStringValue +func (p *TColumnValue) GetStringVal() *TStringValue { + if !p.IsSetStringVal() { + return TColumnValue_StringVal_DEFAULT + } +return p.StringVal +} +func (p *TColumnValue) CountSetFieldsTColumnValue() int { + count := 0 + if (p.IsSetBoolVal()) { + count++ + } + if (p.IsSetByteVal()) { + count++ + } + if (p.IsSetI16Val()) { + count++ + } + if (p.IsSetI32Val()) { + count++ + } + if (p.IsSetI64Val()) { + count++ + } + if (p.IsSetDoubleVal()) { + count++ + } + if (p.IsSetStringVal()) { + count++ + } + return count + +} + +func (p *TColumnValue) IsSetBoolVal() bool { + return p.BoolVal != nil +} + +func (p *TColumnValue) IsSetByteVal() bool { + return p.ByteVal != nil +} + +func (p *TColumnValue) IsSetI16Val() bool { + return p.I16Val != nil +} + +func (p *TColumnValue) IsSetI32Val() bool { + return p.I32Val != nil +} + +func (p *TColumnValue) IsSetI64Val() bool { + return p.I64Val != nil +} + +func (p *TColumnValue) IsSetDoubleVal() bool { + return p.DoubleVal != nil +} + +func (p *TColumnValue) IsSetStringVal() bool { + return p.StringVal != nil +} + +func (p *TColumnValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TColumnValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.BoolVal = &TBoolValue{} + if err := p.BoolVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BoolVal), err) + } + return nil +} + +func (p *TColumnValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ByteVal = &TByteValue{} + if err := p.ByteVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ByteVal), err) + } + return nil +} + +func (p *TColumnValue) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.I16Val = &TI16Value{} + if err := p.I16Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I16Val), err) + } + return nil +} + +func (p *TColumnValue) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.I32Val = &TI32Value{} + if err := p.I32Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I32Val), err) + } + return nil +} + +func (p *TColumnValue) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.I64Val = &TI64Value{} + if err := p.I64Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I64Val), err) + } + return nil +} + +func (p *TColumnValue) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + p.DoubleVal = &TDoubleValue{} + if err := p.DoubleVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DoubleVal), err) + } + return nil +} + +func (p *TColumnValue) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + p.StringVal = &TStringValue{} + if err := p.StringVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StringVal), err) + } + return nil +} + +func (p *TColumnValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTColumnValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TColumnValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TColumnValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBoolVal() { + if err := oprot.WriteFieldBegin(ctx, "boolVal", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:boolVal: ", p), err) } + if err := p.BoolVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BoolVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:boolVal: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetByteVal() { + if err := oprot.WriteFieldBegin(ctx, "byteVal", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:byteVal: ", p), err) } + if err := p.ByteVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ByteVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:byteVal: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI16Val() { + if err := oprot.WriteFieldBegin(ctx, "i16Val", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:i16Val: ", p), err) } + if err := p.I16Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I16Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:i16Val: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI32Val() { + if err := oprot.WriteFieldBegin(ctx, "i32Val", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:i32Val: ", p), err) } + if err := p.I32Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I32Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:i32Val: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI64Val() { + if err := oprot.WriteFieldBegin(ctx, "i64Val", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:i64Val: ", p), err) } + if err := p.I64Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I64Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:i64Val: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDoubleVal() { + if err := oprot.WriteFieldBegin(ctx, "doubleVal", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:doubleVal: ", p), err) } + if err := p.DoubleVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DoubleVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:doubleVal: ", p), err) } + } + return err +} + +func (p *TColumnValue) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStringVal() { + if err := oprot.WriteFieldBegin(ctx, "stringVal", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:stringVal: ", p), err) } + if err := p.StringVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StringVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:stringVal: ", p), err) } + } + return err +} + +func (p *TColumnValue) Equals(other *TColumnValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.BoolVal.Equals(other.BoolVal) { return false } + if !p.ByteVal.Equals(other.ByteVal) { return false } + if !p.I16Val.Equals(other.I16Val) { return false } + if !p.I32Val.Equals(other.I32Val) { return false } + if !p.I64Val.Equals(other.I64Val) { return false } + if !p.DoubleVal.Equals(other.DoubleVal) { return false } + if !p.StringVal.Equals(other.StringVal) { return false } + return true +} + +func (p *TColumnValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TColumnValue(%+v)", *p) +} + +func (p *TColumnValue) Validate() error { + return nil +} +// Attributes: +// - ColVals +type TRow struct { + ColVals []*TColumnValue `thrift:"colVals,1,required" db:"colVals" json:"colVals"` +} + +func NewTRow() *TRow { + return &TRow{} +} + + +func (p *TRow) GetColVals() []*TColumnValue { + return p.ColVals +} +func (p *TRow) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetColVals bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetColVals = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetColVals{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColVals is not set")); + } + return nil +} + +func (p *TRow) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TColumnValue, 0, size) + p.ColVals = tSlice + for i := 0; i < size; i ++ { + _elem13 := &TColumnValue{} + if err := _elem13.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem13), err) + } + p.ColVals = append(p.ColVals, _elem13) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRow) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TRow"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TRow) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "colVals", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:colVals: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ColVals)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ColVals { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:colVals: ", p), err) } + return err +} + +func (p *TRow) Equals(other *TRow) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.ColVals) != len(other.ColVals) { return false } + for i, _tgt := range p.ColVals { + _src14 := other.ColVals[i] + if !_tgt.Equals(_src14) { return false } + } + return true +} + +func (p *TRow) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRow(%+v)", *p) +} + +func (p *TRow) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TBoolColumn struct { + Values []bool `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTBoolColumn() *TBoolColumn { + return &TBoolColumn{} +} + + +func (p *TBoolColumn) GetValues() []bool { + return p.Values +} + +func (p *TBoolColumn) GetNulls() []byte { + return p.Nulls +} +func (p *TBoolColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TBoolColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]bool, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem15 bool + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem15 = v +} + p.Values = append(p.Values, _elem15) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TBoolColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TBoolColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TBoolColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TBoolColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.BOOL, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteBool(ctx, bool(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TBoolColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TBoolColumn) Equals(other *TBoolColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src16 := other.Values[i] + if _tgt != _src16 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TBoolColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBoolColumn(%+v)", *p) +} + +func (p *TBoolColumn) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TByteColumn struct { + Values []int8 `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTByteColumn() *TByteColumn { + return &TByteColumn{} +} + + +func (p *TByteColumn) GetValues() []int8 { + return p.Values +} + +func (p *TByteColumn) GetNulls() []byte { + return p.Nulls +} +func (p *TByteColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TByteColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int8, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem17 int8 + if v, err := iprot.ReadByte(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := int8(v) + _elem17 = temp +} + p.Values = append(p.Values, _elem17) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TByteColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TByteColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TByteColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TByteColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.BYTE, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteByte(ctx, int8(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TByteColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TByteColumn) Equals(other *TByteColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src18 := other.Values[i] + if _tgt != _src18 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TByteColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TByteColumn(%+v)", *p) +} + +func (p *TByteColumn) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TI16Column struct { + Values []int16 `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTI16Column() *TI16Column { + return &TI16Column{} +} + + +func (p *TI16Column) GetValues() []int16 { + return p.Values +} + +func (p *TI16Column) GetNulls() []byte { + return p.Nulls +} +func (p *TI16Column) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TI16Column) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int16, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem19 int16 + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem19 = v +} + p.Values = append(p.Values, _elem19) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TI16Column) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TI16Column) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI16Column"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI16Column) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I16, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteI16(ctx, int16(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TI16Column) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TI16Column) Equals(other *TI16Column) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src20 := other.Values[i] + if _tgt != _src20 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TI16Column) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI16Column(%+v)", *p) +} + +func (p *TI16Column) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TI32Column struct { + Values []int32 `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTI32Column() *TI32Column { + return &TI32Column{} +} + + +func (p *TI32Column) GetValues() []int32 { + return p.Values +} + +func (p *TI32Column) GetNulls() []byte { + return p.Nulls +} +func (p *TI32Column) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TI32Column) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem21 int32 + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem21 = v +} + p.Values = append(p.Values, _elem21) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TI32Column) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TI32Column) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI32Column"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI32Column) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I32, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteI32(ctx, int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TI32Column) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TI32Column) Equals(other *TI32Column) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src22 := other.Values[i] + if _tgt != _src22 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TI32Column) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI32Column(%+v)", *p) +} + +func (p *TI32Column) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TI64Column struct { + Values []int64 `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTI64Column() *TI64Column { + return &TI64Column{} +} + + +func (p *TI64Column) GetValues() []int64 { + return p.Values +} + +func (p *TI64Column) GetNulls() []byte { + return p.Nulls +} +func (p *TI64Column) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TI64Column) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int64, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem23 int64 + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem23 = v +} + p.Values = append(p.Values, _elem23) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TI64Column) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TI64Column) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TI64Column"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TI64Column) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I64, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteI64(ctx, int64(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TI64Column) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TI64Column) Equals(other *TI64Column) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src24 := other.Values[i] + if _tgt != _src24 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TI64Column) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TI64Column(%+v)", *p) +} + +func (p *TI64Column) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TDoubleColumn struct { + Values []float64 `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTDoubleColumn() *TDoubleColumn { + return &TDoubleColumn{} +} + + +func (p *TDoubleColumn) GetValues() []float64 { + return p.Values +} + +func (p *TDoubleColumn) GetNulls() []byte { + return p.Nulls +} +func (p *TDoubleColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TDoubleColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]float64, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem25 float64 + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem25 = v +} + p.Values = append(p.Values, _elem25) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TDoubleColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TDoubleColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDoubleColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDoubleColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.DOUBLE, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteDouble(ctx, float64(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TDoubleColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TDoubleColumn) Equals(other *TDoubleColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src26 := other.Values[i] + if _tgt != _src26 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TDoubleColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDoubleColumn(%+v)", *p) +} + +func (p *TDoubleColumn) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TStringColumn struct { + Values []string `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTStringColumn() *TStringColumn { + return &TStringColumn{} +} + + +func (p *TStringColumn) GetValues() []string { + return p.Values +} + +func (p *TStringColumn) GetNulls() []byte { + return p.Nulls +} +func (p *TStringColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TStringColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem27 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem27 = v +} + p.Values = append(p.Values, _elem27) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TStringColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TStringColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TStringColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TStringColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TStringColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TStringColumn) Equals(other *TStringColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src28 := other.Values[i] + if _tgt != _src28 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TStringColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStringColumn(%+v)", *p) +} + +func (p *TStringColumn) Validate() error { + return nil +} +// Attributes: +// - Values +// - Nulls +type TBinaryColumn struct { + Values [][]byte `thrift:"values,1,required" db:"values" json:"values"` + Nulls []byte `thrift:"nulls,2,required" db:"nulls" json:"nulls"` +} + +func NewTBinaryColumn() *TBinaryColumn { + return &TBinaryColumn{} +} + + +func (p *TBinaryColumn) GetValues() [][]byte { + return p.Values +} + +func (p *TBinaryColumn) GetNulls() []byte { + return p.Nulls +} +func (p *TBinaryColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetValues bool = false; + var issetNulls bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetValues = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetNulls = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetValues{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Values is not set")); + } + if !issetNulls{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Nulls is not set")); + } + return nil +} + +func (p *TBinaryColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]byte, 0, size) + p.Values = tSlice + for i := 0; i < size; i ++ { +var _elem29 []byte + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem29 = v +} + p.Values = append(p.Values, _elem29) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TBinaryColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Nulls = v +} + return nil +} + +func (p *TBinaryColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TBinaryColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TBinaryColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "values", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:values: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteBinary(ctx, v); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:values: ", p), err) } + return err +} + +func (p *TBinaryColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "nulls", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:nulls: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Nulls); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nulls (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:nulls: ", p), err) } + return err +} + +func (p *TBinaryColumn) Equals(other *TBinaryColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Values) != len(other.Values) { return false } + for i, _tgt := range p.Values { + _src30 := other.Values[i] + if bytes.Compare(_tgt, _src30) != 0 { return false } + } + if bytes.Compare(p.Nulls, other.Nulls) != 0 { return false } + return true +} + +func (p *TBinaryColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TBinaryColumn(%+v)", *p) +} + +func (p *TBinaryColumn) Validate() error { + return nil +} +// Attributes: +// - BoolVal +// - ByteVal +// - I16Val +// - I32Val +// - I64Val +// - DoubleVal +// - StringVal +// - BinaryVal +type TColumn struct { + BoolVal *TBoolColumn `thrift:"boolVal,1" db:"boolVal" json:"boolVal,omitempty"` + ByteVal *TByteColumn `thrift:"byteVal,2" db:"byteVal" json:"byteVal,omitempty"` + I16Val *TI16Column `thrift:"i16Val,3" db:"i16Val" json:"i16Val,omitempty"` + I32Val *TI32Column `thrift:"i32Val,4" db:"i32Val" json:"i32Val,omitempty"` + I64Val *TI64Column `thrift:"i64Val,5" db:"i64Val" json:"i64Val,omitempty"` + DoubleVal *TDoubleColumn `thrift:"doubleVal,6" db:"doubleVal" json:"doubleVal,omitempty"` + StringVal *TStringColumn `thrift:"stringVal,7" db:"stringVal" json:"stringVal,omitempty"` + BinaryVal *TBinaryColumn `thrift:"binaryVal,8" db:"binaryVal" json:"binaryVal,omitempty"` +} + +func NewTColumn() *TColumn { + return &TColumn{} +} + +var TColumn_BoolVal_DEFAULT *TBoolColumn +func (p *TColumn) GetBoolVal() *TBoolColumn { + if !p.IsSetBoolVal() { + return TColumn_BoolVal_DEFAULT + } +return p.BoolVal +} +var TColumn_ByteVal_DEFAULT *TByteColumn +func (p *TColumn) GetByteVal() *TByteColumn { + if !p.IsSetByteVal() { + return TColumn_ByteVal_DEFAULT + } +return p.ByteVal +} +var TColumn_I16Val_DEFAULT *TI16Column +func (p *TColumn) GetI16Val() *TI16Column { + if !p.IsSetI16Val() { + return TColumn_I16Val_DEFAULT + } +return p.I16Val +} +var TColumn_I32Val_DEFAULT *TI32Column +func (p *TColumn) GetI32Val() *TI32Column { + if !p.IsSetI32Val() { + return TColumn_I32Val_DEFAULT + } +return p.I32Val +} +var TColumn_I64Val_DEFAULT *TI64Column +func (p *TColumn) GetI64Val() *TI64Column { + if !p.IsSetI64Val() { + return TColumn_I64Val_DEFAULT + } +return p.I64Val +} +var TColumn_DoubleVal_DEFAULT *TDoubleColumn +func (p *TColumn) GetDoubleVal() *TDoubleColumn { + if !p.IsSetDoubleVal() { + return TColumn_DoubleVal_DEFAULT + } +return p.DoubleVal +} +var TColumn_StringVal_DEFAULT *TStringColumn +func (p *TColumn) GetStringVal() *TStringColumn { + if !p.IsSetStringVal() { + return TColumn_StringVal_DEFAULT + } +return p.StringVal +} +var TColumn_BinaryVal_DEFAULT *TBinaryColumn +func (p *TColumn) GetBinaryVal() *TBinaryColumn { + if !p.IsSetBinaryVal() { + return TColumn_BinaryVal_DEFAULT + } +return p.BinaryVal +} +func (p *TColumn) CountSetFieldsTColumn() int { + count := 0 + if (p.IsSetBoolVal()) { + count++ + } + if (p.IsSetByteVal()) { + count++ + } + if (p.IsSetI16Val()) { + count++ + } + if (p.IsSetI32Val()) { + count++ + } + if (p.IsSetI64Val()) { + count++ + } + if (p.IsSetDoubleVal()) { + count++ + } + if (p.IsSetStringVal()) { + count++ + } + if (p.IsSetBinaryVal()) { + count++ + } + return count + +} + +func (p *TColumn) IsSetBoolVal() bool { + return p.BoolVal != nil +} + +func (p *TColumn) IsSetByteVal() bool { + return p.ByteVal != nil +} + +func (p *TColumn) IsSetI16Val() bool { + return p.I16Val != nil +} + +func (p *TColumn) IsSetI32Val() bool { + return p.I32Val != nil +} + +func (p *TColumn) IsSetI64Val() bool { + return p.I64Val != nil +} + +func (p *TColumn) IsSetDoubleVal() bool { + return p.DoubleVal != nil +} + +func (p *TColumn) IsSetStringVal() bool { + return p.StringVal != nil +} + +func (p *TColumn) IsSetBinaryVal() bool { + return p.BinaryVal != nil +} + +func (p *TColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.BoolVal = &TBoolColumn{} + if err := p.BoolVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BoolVal), err) + } + return nil +} + +func (p *TColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ByteVal = &TByteColumn{} + if err := p.ByteVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ByteVal), err) + } + return nil +} + +func (p *TColumn) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.I16Val = &TI16Column{} + if err := p.I16Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I16Val), err) + } + return nil +} + +func (p *TColumn) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.I32Val = &TI32Column{} + if err := p.I32Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I32Val), err) + } + return nil +} + +func (p *TColumn) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.I64Val = &TI64Column{} + if err := p.I64Val.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.I64Val), err) + } + return nil +} + +func (p *TColumn) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + p.DoubleVal = &TDoubleColumn{} + if err := p.DoubleVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DoubleVal), err) + } + return nil +} + +func (p *TColumn) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + p.StringVal = &TStringColumn{} + if err := p.StringVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StringVal), err) + } + return nil +} + +func (p *TColumn) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + p.BinaryVal = &TBinaryColumn{} + if err := p.BinaryVal.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BinaryVal), err) + } + return nil +} + +func (p *TColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTColumn(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TColumn"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBoolVal() { + if err := oprot.WriteFieldBegin(ctx, "boolVal", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:boolVal: ", p), err) } + if err := p.BoolVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BoolVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:boolVal: ", p), err) } + } + return err +} + +func (p *TColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetByteVal() { + if err := oprot.WriteFieldBegin(ctx, "byteVal", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:byteVal: ", p), err) } + if err := p.ByteVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ByteVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:byteVal: ", p), err) } + } + return err +} + +func (p *TColumn) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI16Val() { + if err := oprot.WriteFieldBegin(ctx, "i16Val", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:i16Val: ", p), err) } + if err := p.I16Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I16Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:i16Val: ", p), err) } + } + return err +} + +func (p *TColumn) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI32Val() { + if err := oprot.WriteFieldBegin(ctx, "i32Val", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:i32Val: ", p), err) } + if err := p.I32Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I32Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:i32Val: ", p), err) } + } + return err +} + +func (p *TColumn) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetI64Val() { + if err := oprot.WriteFieldBegin(ctx, "i64Val", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:i64Val: ", p), err) } + if err := p.I64Val.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.I64Val), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:i64Val: ", p), err) } + } + return err +} + +func (p *TColumn) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDoubleVal() { + if err := oprot.WriteFieldBegin(ctx, "doubleVal", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:doubleVal: ", p), err) } + if err := p.DoubleVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DoubleVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:doubleVal: ", p), err) } + } + return err +} + +func (p *TColumn) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStringVal() { + if err := oprot.WriteFieldBegin(ctx, "stringVal", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:stringVal: ", p), err) } + if err := p.StringVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StringVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:stringVal: ", p), err) } + } + return err +} + +func (p *TColumn) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBinaryVal() { + if err := oprot.WriteFieldBegin(ctx, "binaryVal", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binaryVal: ", p), err) } + if err := p.BinaryVal.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BinaryVal), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binaryVal: ", p), err) } + } + return err +} + +func (p *TColumn) Equals(other *TColumn) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.BoolVal.Equals(other.BoolVal) { return false } + if !p.ByteVal.Equals(other.ByteVal) { return false } + if !p.I16Val.Equals(other.I16Val) { return false } + if !p.I32Val.Equals(other.I32Val) { return false } + if !p.I64Val.Equals(other.I64Val) { return false } + if !p.DoubleVal.Equals(other.DoubleVal) { return false } + if !p.StringVal.Equals(other.StringVal) { return false } + if !p.BinaryVal.Equals(other.BinaryVal) { return false } + return true +} + +func (p *TColumn) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TColumn(%+v)", *p) +} + +func (p *TColumn) Validate() error { + return nil +} +// Attributes: +// - CompressionCodec +type TDBSqlJsonArrayFormat struct { + CompressionCodec *TDBSqlCompressionCodec `thrift:"compressionCodec,1" db:"compressionCodec" json:"compressionCodec,omitempty"` +} + +func NewTDBSqlJsonArrayFormat() *TDBSqlJsonArrayFormat { + return &TDBSqlJsonArrayFormat{} +} + +var TDBSqlJsonArrayFormat_CompressionCodec_DEFAULT TDBSqlCompressionCodec +func (p *TDBSqlJsonArrayFormat) GetCompressionCodec() TDBSqlCompressionCodec { + if !p.IsSetCompressionCodec() { + return TDBSqlJsonArrayFormat_CompressionCodec_DEFAULT + } +return *p.CompressionCodec +} +func (p *TDBSqlJsonArrayFormat) IsSetCompressionCodec() bool { + return p.CompressionCodec != nil +} + +func (p *TDBSqlJsonArrayFormat) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlJsonArrayFormat) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TDBSqlCompressionCodec(v) + p.CompressionCodec = &temp +} + return nil +} + +func (p *TDBSqlJsonArrayFormat) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlJsonArrayFormat"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlJsonArrayFormat) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCompressionCodec() { + if err := oprot.WriteFieldBegin(ctx, "compressionCodec", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:compressionCodec: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.CompressionCodec)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressionCodec (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:compressionCodec: ", p), err) } + } + return err +} + +func (p *TDBSqlJsonArrayFormat) Equals(other *TDBSqlJsonArrayFormat) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.CompressionCodec != other.CompressionCodec { + if p.CompressionCodec == nil || other.CompressionCodec == nil { + return false + } + if (*p.CompressionCodec) != (*other.CompressionCodec) { return false } + } + return true +} + +func (p *TDBSqlJsonArrayFormat) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlJsonArrayFormat(%+v)", *p) +} + +func (p *TDBSqlJsonArrayFormat) Validate() error { + return nil +} +// Attributes: +// - CompressionCodec +type TDBSqlCsvFormat struct { + CompressionCodec *TDBSqlCompressionCodec `thrift:"compressionCodec,1" db:"compressionCodec" json:"compressionCodec,omitempty"` +} + +func NewTDBSqlCsvFormat() *TDBSqlCsvFormat { + return &TDBSqlCsvFormat{} +} + +var TDBSqlCsvFormat_CompressionCodec_DEFAULT TDBSqlCompressionCodec +func (p *TDBSqlCsvFormat) GetCompressionCodec() TDBSqlCompressionCodec { + if !p.IsSetCompressionCodec() { + return TDBSqlCsvFormat_CompressionCodec_DEFAULT + } +return *p.CompressionCodec +} +func (p *TDBSqlCsvFormat) IsSetCompressionCodec() bool { + return p.CompressionCodec != nil +} + +func (p *TDBSqlCsvFormat) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlCsvFormat) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TDBSqlCompressionCodec(v) + p.CompressionCodec = &temp +} + return nil +} + +func (p *TDBSqlCsvFormat) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlCsvFormat"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlCsvFormat) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCompressionCodec() { + if err := oprot.WriteFieldBegin(ctx, "compressionCodec", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:compressionCodec: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.CompressionCodec)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressionCodec (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:compressionCodec: ", p), err) } + } + return err +} + +func (p *TDBSqlCsvFormat) Equals(other *TDBSqlCsvFormat) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.CompressionCodec != other.CompressionCodec { + if p.CompressionCodec == nil || other.CompressionCodec == nil { + return false + } + if (*p.CompressionCodec) != (*other.CompressionCodec) { return false } + } + return true +} + +func (p *TDBSqlCsvFormat) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlCsvFormat(%+v)", *p) +} + +func (p *TDBSqlCsvFormat) Validate() error { + return nil +} +// Attributes: +// - ArrowLayout +// - CompressionCodec +type TDBSqlArrowFormat struct { + ArrowLayout *TDBSqlArrowLayout `thrift:"arrowLayout,1" db:"arrowLayout" json:"arrowLayout,omitempty"` + CompressionCodec *TDBSqlCompressionCodec `thrift:"compressionCodec,2" db:"compressionCodec" json:"compressionCodec,omitempty"` +} + +func NewTDBSqlArrowFormat() *TDBSqlArrowFormat { + return &TDBSqlArrowFormat{} +} + +var TDBSqlArrowFormat_ArrowLayout_DEFAULT TDBSqlArrowLayout +func (p *TDBSqlArrowFormat) GetArrowLayout() TDBSqlArrowLayout { + if !p.IsSetArrowLayout() { + return TDBSqlArrowFormat_ArrowLayout_DEFAULT + } +return *p.ArrowLayout +} +var TDBSqlArrowFormat_CompressionCodec_DEFAULT TDBSqlCompressionCodec +func (p *TDBSqlArrowFormat) GetCompressionCodec() TDBSqlCompressionCodec { + if !p.IsSetCompressionCodec() { + return TDBSqlArrowFormat_CompressionCodec_DEFAULT + } +return *p.CompressionCodec +} +func (p *TDBSqlArrowFormat) IsSetArrowLayout() bool { + return p.ArrowLayout != nil +} + +func (p *TDBSqlArrowFormat) IsSetCompressionCodec() bool { + return p.CompressionCodec != nil +} + +func (p *TDBSqlArrowFormat) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlArrowFormat) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TDBSqlArrowLayout(v) + p.ArrowLayout = &temp +} + return nil +} + +func (p *TDBSqlArrowFormat) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TDBSqlCompressionCodec(v) + p.CompressionCodec = &temp +} + return nil +} + +func (p *TDBSqlArrowFormat) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlArrowFormat"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlArrowFormat) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArrowLayout() { + if err := oprot.WriteFieldBegin(ctx, "arrowLayout", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:arrowLayout: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ArrowLayout)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.arrowLayout (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:arrowLayout: ", p), err) } + } + return err +} + +func (p *TDBSqlArrowFormat) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCompressionCodec() { + if err := oprot.WriteFieldBegin(ctx, "compressionCodec", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:compressionCodec: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.CompressionCodec)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressionCodec (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:compressionCodec: ", p), err) } + } + return err +} + +func (p *TDBSqlArrowFormat) Equals(other *TDBSqlArrowFormat) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ArrowLayout != other.ArrowLayout { + if p.ArrowLayout == nil || other.ArrowLayout == nil { + return false + } + if (*p.ArrowLayout) != (*other.ArrowLayout) { return false } + } + if p.CompressionCodec != other.CompressionCodec { + if p.CompressionCodec == nil || other.CompressionCodec == nil { + return false + } + if (*p.CompressionCodec) != (*other.CompressionCodec) { return false } + } + return true +} + +func (p *TDBSqlArrowFormat) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlArrowFormat(%+v)", *p) +} + +func (p *TDBSqlArrowFormat) Validate() error { + return nil +} +// Attributes: +// - ArrowFormat +// - CsvFormat +// - JsonArrayFormat +type TDBSqlResultFormat struct { + ArrowFormat *TDBSqlArrowFormat `thrift:"arrowFormat,1" db:"arrowFormat" json:"arrowFormat,omitempty"` + CsvFormat *TDBSqlCsvFormat `thrift:"csvFormat,2" db:"csvFormat" json:"csvFormat,omitempty"` + JsonArrayFormat *TDBSqlJsonArrayFormat `thrift:"jsonArrayFormat,3" db:"jsonArrayFormat" json:"jsonArrayFormat,omitempty"` +} + +func NewTDBSqlResultFormat() *TDBSqlResultFormat { + return &TDBSqlResultFormat{} +} + +var TDBSqlResultFormat_ArrowFormat_DEFAULT *TDBSqlArrowFormat +func (p *TDBSqlResultFormat) GetArrowFormat() *TDBSqlArrowFormat { + if !p.IsSetArrowFormat() { + return TDBSqlResultFormat_ArrowFormat_DEFAULT + } +return p.ArrowFormat +} +var TDBSqlResultFormat_CsvFormat_DEFAULT *TDBSqlCsvFormat +func (p *TDBSqlResultFormat) GetCsvFormat() *TDBSqlCsvFormat { + if !p.IsSetCsvFormat() { + return TDBSqlResultFormat_CsvFormat_DEFAULT + } +return p.CsvFormat +} +var TDBSqlResultFormat_JsonArrayFormat_DEFAULT *TDBSqlJsonArrayFormat +func (p *TDBSqlResultFormat) GetJsonArrayFormat() *TDBSqlJsonArrayFormat { + if !p.IsSetJsonArrayFormat() { + return TDBSqlResultFormat_JsonArrayFormat_DEFAULT + } +return p.JsonArrayFormat +} +func (p *TDBSqlResultFormat) CountSetFieldsTDBSqlResultFormat() int { + count := 0 + if (p.IsSetArrowFormat()) { + count++ + } + if (p.IsSetCsvFormat()) { + count++ + } + if (p.IsSetJsonArrayFormat()) { + count++ + } + return count + +} + +func (p *TDBSqlResultFormat) IsSetArrowFormat() bool { + return p.ArrowFormat != nil +} + +func (p *TDBSqlResultFormat) IsSetCsvFormat() bool { + return p.CsvFormat != nil +} + +func (p *TDBSqlResultFormat) IsSetJsonArrayFormat() bool { + return p.JsonArrayFormat != nil +} + +func (p *TDBSqlResultFormat) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlResultFormat) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.ArrowFormat = &TDBSqlArrowFormat{} + if err := p.ArrowFormat.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ArrowFormat), err) + } + return nil +} + +func (p *TDBSqlResultFormat) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.CsvFormat = &TDBSqlCsvFormat{} + if err := p.CsvFormat.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CsvFormat), err) + } + return nil +} + +func (p *TDBSqlResultFormat) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.JsonArrayFormat = &TDBSqlJsonArrayFormat{} + if err := p.JsonArrayFormat.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.JsonArrayFormat), err) + } + return nil +} + +func (p *TDBSqlResultFormat) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTDBSqlResultFormat(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TDBSqlResultFormat"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlResultFormat) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArrowFormat() { + if err := oprot.WriteFieldBegin(ctx, "arrowFormat", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:arrowFormat: ", p), err) } + if err := p.ArrowFormat.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ArrowFormat), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:arrowFormat: ", p), err) } + } + return err +} + +func (p *TDBSqlResultFormat) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCsvFormat() { + if err := oprot.WriteFieldBegin(ctx, "csvFormat", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:csvFormat: ", p), err) } + if err := p.CsvFormat.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CsvFormat), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:csvFormat: ", p), err) } + } + return err +} + +func (p *TDBSqlResultFormat) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetJsonArrayFormat() { + if err := oprot.WriteFieldBegin(ctx, "jsonArrayFormat", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:jsonArrayFormat: ", p), err) } + if err := p.JsonArrayFormat.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.JsonArrayFormat), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:jsonArrayFormat: ", p), err) } + } + return err +} + +func (p *TDBSqlResultFormat) Equals(other *TDBSqlResultFormat) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.ArrowFormat.Equals(other.ArrowFormat) { return false } + if !p.CsvFormat.Equals(other.CsvFormat) { return false } + if !p.JsonArrayFormat.Equals(other.JsonArrayFormat) { return false } + return true +} + +func (p *TDBSqlResultFormat) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlResultFormat(%+v)", *p) +} + +func (p *TDBSqlResultFormat) Validate() error { + return nil +} +// Attributes: +// - Batch +// - RowCount +type TSparkArrowBatch struct { + Batch []byte `thrift:"batch,1,required" db:"batch" json:"batch"` + RowCount int64 `thrift:"rowCount,2,required" db:"rowCount" json:"rowCount"` +} + +func NewTSparkArrowBatch() *TSparkArrowBatch { + return &TSparkArrowBatch{} +} + + +func (p *TSparkArrowBatch) GetBatch() []byte { + return p.Batch +} + +func (p *TSparkArrowBatch) GetRowCount() int64 { + return p.RowCount +} +func (p *TSparkArrowBatch) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetBatch bool = false; + var issetRowCount bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetBatch = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetRowCount = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetBatch{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Batch is not set")); + } + if !issetRowCount{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RowCount is not set")); + } + return nil +} + +func (p *TSparkArrowBatch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Batch = v +} + return nil +} + +func (p *TSparkArrowBatch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.RowCount = v +} + return nil +} + +func (p *TSparkArrowBatch) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkArrowBatch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkArrowBatch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Batch); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.batch (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) } + return err +} + +func (p *TSparkArrowBatch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "rowCount", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:rowCount: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.RowCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rowCount (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:rowCount: ", p), err) } + return err +} + +func (p *TSparkArrowBatch) Equals(other *TSparkArrowBatch) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if bytes.Compare(p.Batch, other.Batch) != 0 { return false } + if p.RowCount != other.RowCount { return false } + return true +} + +func (p *TSparkArrowBatch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkArrowBatch(%+v)", *p) +} + +func (p *TSparkArrowBatch) Validate() error { + return nil +} +// Attributes: +// - FileLink +// - ExpiryTime +// - StartRowOffset +// - RowCount +// - BytesNum +// - HttpHeaders +type TSparkArrowResultLink struct { + FileLink string `thrift:"fileLink,1,required" db:"fileLink" json:"fileLink"` + ExpiryTime int64 `thrift:"expiryTime,2,required" db:"expiryTime" json:"expiryTime"` + StartRowOffset int64 `thrift:"startRowOffset,3,required" db:"startRowOffset" json:"startRowOffset"` + RowCount int64 `thrift:"rowCount,4,required" db:"rowCount" json:"rowCount"` + BytesNum int64 `thrift:"bytesNum,5,required" db:"bytesNum" json:"bytesNum"` + HttpHeaders map[string]string `thrift:"httpHeaders,6" db:"httpHeaders" json:"httpHeaders,omitempty"` +} + +func NewTSparkArrowResultLink() *TSparkArrowResultLink { + return &TSparkArrowResultLink{} +} + + +func (p *TSparkArrowResultLink) GetFileLink() string { + return p.FileLink +} + +func (p *TSparkArrowResultLink) GetExpiryTime() int64 { + return p.ExpiryTime +} + +func (p *TSparkArrowResultLink) GetStartRowOffset() int64 { + return p.StartRowOffset +} + +func (p *TSparkArrowResultLink) GetRowCount() int64 { + return p.RowCount +} + +func (p *TSparkArrowResultLink) GetBytesNum() int64 { + return p.BytesNum +} +var TSparkArrowResultLink_HttpHeaders_DEFAULT map[string]string + +func (p *TSparkArrowResultLink) GetHttpHeaders() map[string]string { + return p.HttpHeaders +} +func (p *TSparkArrowResultLink) IsSetHttpHeaders() bool { + return p.HttpHeaders != nil +} + +func (p *TSparkArrowResultLink) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFileLink bool = false; + var issetExpiryTime bool = false; + var issetStartRowOffset bool = false; + var issetRowCount bool = false; + var issetBytesNum bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetFileLink = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetExpiryTime = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetStartRowOffset = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetRowCount = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetBytesNum = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFileLink{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FileLink is not set")); + } + if !issetExpiryTime{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ExpiryTime is not set")); + } + if !issetStartRowOffset{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartRowOffset is not set")); + } + if !issetRowCount{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RowCount is not set")); + } + if !issetBytesNum{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BytesNum is not set")); + } + return nil +} + +func (p *TSparkArrowResultLink) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.FileLink = v +} + return nil +} + +func (p *TSparkArrowResultLink) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.ExpiryTime = v +} + return nil +} + +func (p *TSparkArrowResultLink) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.StartRowOffset = v +} + return nil +} + +func (p *TSparkArrowResultLink) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.RowCount = v +} + return nil +} + +func (p *TSparkArrowResultLink) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.BytesNum = v +} + return nil +} + +func (p *TSparkArrowResultLink) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.HttpHeaders = tMap + for i := 0; i < size; i ++ { +var _key31 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key31 = v +} +var _val32 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val32 = v +} + p.HttpHeaders[_key31] = _val32 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TSparkArrowResultLink) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkArrowResultLink"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkArrowResultLink) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fileLink", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fileLink: ", p), err) } + if err := oprot.WriteString(ctx, string(p.FileLink)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fileLink (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fileLink: ", p), err) } + return err +} + +func (p *TSparkArrowResultLink) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "expiryTime", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:expiryTime: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.ExpiryTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expiryTime (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:expiryTime: ", p), err) } + return err +} + +func (p *TSparkArrowResultLink) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startRowOffset", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:startRowOffset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.StartRowOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startRowOffset (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:startRowOffset: ", p), err) } + return err +} + +func (p *TSparkArrowResultLink) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "rowCount", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:rowCount: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.RowCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rowCount (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:rowCount: ", p), err) } + return err +} + +func (p *TSparkArrowResultLink) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "bytesNum", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:bytesNum: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.BytesNum)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bytesNum (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:bytesNum: ", p), err) } + return err +} + +func (p *TSparkArrowResultLink) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHttpHeaders() { + if err := oprot.WriteFieldBegin(ctx, "httpHeaders", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:httpHeaders: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.HttpHeaders)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HttpHeaders { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:httpHeaders: ", p), err) } + } + return err +} + +func (p *TSparkArrowResultLink) Equals(other *TSparkArrowResultLink) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FileLink != other.FileLink { return false } + if p.ExpiryTime != other.ExpiryTime { return false } + if p.StartRowOffset != other.StartRowOffset { return false } + if p.RowCount != other.RowCount { return false } + if p.BytesNum != other.BytesNum { return false } + if len(p.HttpHeaders) != len(other.HttpHeaders) { return false } + for k, _tgt := range p.HttpHeaders { + _src33 := other.HttpHeaders[k] + if _tgt != _src33 { return false } + } + return true +} + +func (p *TSparkArrowResultLink) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkArrowResultLink(%+v)", *p) +} + +func (p *TSparkArrowResultLink) Validate() error { + return nil +} +// Attributes: +// - FilePath +// - StartRowOffset +// - RowCount +// - UncompressedBytes +// - CompressedBytes +// - FileLink +// - LinkExpiryTime +// - HttpHeaders +type TDBSqlCloudResultFile struct { + FilePath *string `thrift:"filePath,1" db:"filePath" json:"filePath,omitempty"` + StartRowOffset *int64 `thrift:"startRowOffset,2" db:"startRowOffset" json:"startRowOffset,omitempty"` + RowCount *int64 `thrift:"rowCount,3" db:"rowCount" json:"rowCount,omitempty"` + UncompressedBytes *int64 `thrift:"uncompressedBytes,4" db:"uncompressedBytes" json:"uncompressedBytes,omitempty"` + CompressedBytes *int64 `thrift:"compressedBytes,5" db:"compressedBytes" json:"compressedBytes,omitempty"` + FileLink *string `thrift:"fileLink,6" db:"fileLink" json:"fileLink,omitempty"` + LinkExpiryTime *int64 `thrift:"linkExpiryTime,7" db:"linkExpiryTime" json:"linkExpiryTime,omitempty"` + HttpHeaders map[string]string `thrift:"httpHeaders,8" db:"httpHeaders" json:"httpHeaders,omitempty"` +} + +func NewTDBSqlCloudResultFile() *TDBSqlCloudResultFile { + return &TDBSqlCloudResultFile{} +} + +var TDBSqlCloudResultFile_FilePath_DEFAULT string +func (p *TDBSqlCloudResultFile) GetFilePath() string { + if !p.IsSetFilePath() { + return TDBSqlCloudResultFile_FilePath_DEFAULT + } +return *p.FilePath +} +var TDBSqlCloudResultFile_StartRowOffset_DEFAULT int64 +func (p *TDBSqlCloudResultFile) GetStartRowOffset() int64 { + if !p.IsSetStartRowOffset() { + return TDBSqlCloudResultFile_StartRowOffset_DEFAULT + } +return *p.StartRowOffset +} +var TDBSqlCloudResultFile_RowCount_DEFAULT int64 +func (p *TDBSqlCloudResultFile) GetRowCount() int64 { + if !p.IsSetRowCount() { + return TDBSqlCloudResultFile_RowCount_DEFAULT + } +return *p.RowCount +} +var TDBSqlCloudResultFile_UncompressedBytes_DEFAULT int64 +func (p *TDBSqlCloudResultFile) GetUncompressedBytes() int64 { + if !p.IsSetUncompressedBytes() { + return TDBSqlCloudResultFile_UncompressedBytes_DEFAULT + } +return *p.UncompressedBytes +} +var TDBSqlCloudResultFile_CompressedBytes_DEFAULT int64 +func (p *TDBSqlCloudResultFile) GetCompressedBytes() int64 { + if !p.IsSetCompressedBytes() { + return TDBSqlCloudResultFile_CompressedBytes_DEFAULT + } +return *p.CompressedBytes +} +var TDBSqlCloudResultFile_FileLink_DEFAULT string +func (p *TDBSqlCloudResultFile) GetFileLink() string { + if !p.IsSetFileLink() { + return TDBSqlCloudResultFile_FileLink_DEFAULT + } +return *p.FileLink +} +var TDBSqlCloudResultFile_LinkExpiryTime_DEFAULT int64 +func (p *TDBSqlCloudResultFile) GetLinkExpiryTime() int64 { + if !p.IsSetLinkExpiryTime() { + return TDBSqlCloudResultFile_LinkExpiryTime_DEFAULT + } +return *p.LinkExpiryTime +} +var TDBSqlCloudResultFile_HttpHeaders_DEFAULT map[string]string + +func (p *TDBSqlCloudResultFile) GetHttpHeaders() map[string]string { + return p.HttpHeaders +} +func (p *TDBSqlCloudResultFile) IsSetFilePath() bool { + return p.FilePath != nil +} + +func (p *TDBSqlCloudResultFile) IsSetStartRowOffset() bool { + return p.StartRowOffset != nil +} + +func (p *TDBSqlCloudResultFile) IsSetRowCount() bool { + return p.RowCount != nil +} + +func (p *TDBSqlCloudResultFile) IsSetUncompressedBytes() bool { + return p.UncompressedBytes != nil +} + +func (p *TDBSqlCloudResultFile) IsSetCompressedBytes() bool { + return p.CompressedBytes != nil +} + +func (p *TDBSqlCloudResultFile) IsSetFileLink() bool { + return p.FileLink != nil +} + +func (p *TDBSqlCloudResultFile) IsSetLinkExpiryTime() bool { + return p.LinkExpiryTime != nil +} + +func (p *TDBSqlCloudResultFile) IsSetHttpHeaders() bool { + return p.HttpHeaders != nil +} + +func (p *TDBSqlCloudResultFile) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.MAP { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.FilePath = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.StartRowOffset = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.RowCount = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.UncompressedBytes = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.CompressedBytes = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.FileLink = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.LinkExpiryTime = &v +} + return nil +} + +func (p *TDBSqlCloudResultFile) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.HttpHeaders = tMap + for i := 0; i < size; i ++ { +var _key34 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key34 = v +} +var _val35 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val35 = v +} + p.HttpHeaders[_key34] = _val35 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TDBSqlCloudResultFile) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlCloudResultFile"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlCloudResultFile) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFilePath() { + if err := oprot.WriteFieldBegin(ctx, "filePath", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:filePath: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.FilePath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.filePath (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:filePath: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStartRowOffset() { + if err := oprot.WriteFieldBegin(ctx, "startRowOffset", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:startRowOffset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.StartRowOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startRowOffset (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:startRowOffset: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRowCount() { + if err := oprot.WriteFieldBegin(ctx, "rowCount", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rowCount: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.RowCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rowCount (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rowCount: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUncompressedBytes() { + if err := oprot.WriteFieldBegin(ctx, "uncompressedBytes", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:uncompressedBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.UncompressedBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.uncompressedBytes (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:uncompressedBytes: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCompressedBytes() { + if err := oprot.WriteFieldBegin(ctx, "compressedBytes", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:compressedBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.CompressedBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressedBytes (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:compressedBytes: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFileLink() { + if err := oprot.WriteFieldBegin(ctx, "fileLink", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:fileLink: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.FileLink)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fileLink (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:fileLink: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLinkExpiryTime() { + if err := oprot.WriteFieldBegin(ctx, "linkExpiryTime", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:linkExpiryTime: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.LinkExpiryTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.linkExpiryTime (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:linkExpiryTime: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHttpHeaders() { + if err := oprot.WriteFieldBegin(ctx, "httpHeaders", thrift.MAP, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:httpHeaders: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.HttpHeaders)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HttpHeaders { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:httpHeaders: ", p), err) } + } + return err +} + +func (p *TDBSqlCloudResultFile) Equals(other *TDBSqlCloudResultFile) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FilePath != other.FilePath { + if p.FilePath == nil || other.FilePath == nil { + return false + } + if (*p.FilePath) != (*other.FilePath) { return false } + } + if p.StartRowOffset != other.StartRowOffset { + if p.StartRowOffset == nil || other.StartRowOffset == nil { + return false + } + if (*p.StartRowOffset) != (*other.StartRowOffset) { return false } + } + if p.RowCount != other.RowCount { + if p.RowCount == nil || other.RowCount == nil { + return false + } + if (*p.RowCount) != (*other.RowCount) { return false } + } + if p.UncompressedBytes != other.UncompressedBytes { + if p.UncompressedBytes == nil || other.UncompressedBytes == nil { + return false + } + if (*p.UncompressedBytes) != (*other.UncompressedBytes) { return false } + } + if p.CompressedBytes != other.CompressedBytes { + if p.CompressedBytes == nil || other.CompressedBytes == nil { + return false + } + if (*p.CompressedBytes) != (*other.CompressedBytes) { return false } + } + if p.FileLink != other.FileLink { + if p.FileLink == nil || other.FileLink == nil { + return false + } + if (*p.FileLink) != (*other.FileLink) { return false } + } + if p.LinkExpiryTime != other.LinkExpiryTime { + if p.LinkExpiryTime == nil || other.LinkExpiryTime == nil { + return false + } + if (*p.LinkExpiryTime) != (*other.LinkExpiryTime) { return false } + } + if len(p.HttpHeaders) != len(other.HttpHeaders) { return false } + for k, _tgt := range p.HttpHeaders { + _src36 := other.HttpHeaders[k] + if _tgt != _src36 { return false } + } + return true +} + +func (p *TDBSqlCloudResultFile) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlCloudResultFile(%+v)", *p) +} + +func (p *TDBSqlCloudResultFile) Validate() error { + return nil +} +// Attributes: +// - StartRowOffset +// - Rows +// - Columns +// - BinaryColumns +// - ColumnCount +// - ArrowBatches +// - ResultLinks +// - CloudFetchResults +type TRowSet struct { + StartRowOffset int64 `thrift:"startRowOffset,1,required" db:"startRowOffset" json:"startRowOffset"` + Rows []*TRow `thrift:"rows,2,required" db:"rows" json:"rows"` + Columns []*TColumn `thrift:"columns,3" db:"columns" json:"columns,omitempty"` + BinaryColumns []byte `thrift:"binaryColumns,4" db:"binaryColumns" json:"binaryColumns,omitempty"` + ColumnCount *int32 `thrift:"columnCount,5" db:"columnCount" json:"columnCount,omitempty"` + // unused fields # 6 to 1280 + ArrowBatches []*TSparkArrowBatch `thrift:"arrowBatches,1281" db:"arrowBatches" json:"arrowBatches,omitempty"` + ResultLinks []*TSparkArrowResultLink `thrift:"resultLinks,1282" db:"resultLinks" json:"resultLinks,omitempty"` + // unused fields # 1283 to 3328 + CloudFetchResults []*TDBSqlCloudResultFile `thrift:"cloudFetchResults,3329" db:"cloudFetchResults" json:"cloudFetchResults,omitempty"` +} + +func NewTRowSet() *TRowSet { + return &TRowSet{} +} + + +func (p *TRowSet) GetStartRowOffset() int64 { + return p.StartRowOffset +} + +func (p *TRowSet) GetRows() []*TRow { + return p.Rows +} +var TRowSet_Columns_DEFAULT []*TColumn + +func (p *TRowSet) GetColumns() []*TColumn { + return p.Columns +} +var TRowSet_BinaryColumns_DEFAULT []byte + +func (p *TRowSet) GetBinaryColumns() []byte { + return p.BinaryColumns +} +var TRowSet_ColumnCount_DEFAULT int32 +func (p *TRowSet) GetColumnCount() int32 { + if !p.IsSetColumnCount() { + return TRowSet_ColumnCount_DEFAULT + } +return *p.ColumnCount +} +var TRowSet_ArrowBatches_DEFAULT []*TSparkArrowBatch + +func (p *TRowSet) GetArrowBatches() []*TSparkArrowBatch { + return p.ArrowBatches +} +var TRowSet_ResultLinks_DEFAULT []*TSparkArrowResultLink + +func (p *TRowSet) GetResultLinks() []*TSparkArrowResultLink { + return p.ResultLinks +} +var TRowSet_CloudFetchResults_DEFAULT []*TDBSqlCloudResultFile + +func (p *TRowSet) GetCloudFetchResults() []*TDBSqlCloudResultFile { + return p.CloudFetchResults +} +func (p *TRowSet) IsSetColumns() bool { + return p.Columns != nil +} + +func (p *TRowSet) IsSetBinaryColumns() bool { + return p.BinaryColumns != nil +} + +func (p *TRowSet) IsSetColumnCount() bool { + return p.ColumnCount != nil +} + +func (p *TRowSet) IsSetArrowBatches() bool { + return p.ArrowBatches != nil +} + +func (p *TRowSet) IsSetResultLinks() bool { + return p.ResultLinks != nil +} + +func (p *TRowSet) IsSetCloudFetchResults() bool { + return p.CloudFetchResults != nil +} + +func (p *TRowSet) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStartRowOffset bool = false; + var issetRows bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStartRowOffset = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStartRowOffset{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartRowOffset is not set")); + } + if !issetRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Rows is not set")); + } + return nil +} + +func (p *TRowSet) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.StartRowOffset = v +} + return nil +} + +func (p *TRowSet) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TRow, 0, size) + p.Rows = tSlice + for i := 0; i < size; i ++ { + _elem37 := &TRow{} + if err := _elem37.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem37), err) + } + p.Rows = append(p.Rows, _elem37) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRowSet) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TColumn, 0, size) + p.Columns = tSlice + for i := 0; i < size; i ++ { + _elem38 := &TColumn{} + if err := _elem38.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem38), err) + } + p.Columns = append(p.Columns, _elem38) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRowSet) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.BinaryColumns = v +} + return nil +} + +func (p *TRowSet) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.ColumnCount = &v +} + return nil +} + +func (p *TRowSet) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TSparkArrowBatch, 0, size) + p.ArrowBatches = tSlice + for i := 0; i < size; i ++ { + _elem39 := &TSparkArrowBatch{} + if err := _elem39.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem39), err) + } + p.ArrowBatches = append(p.ArrowBatches, _elem39) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRowSet) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TSparkArrowResultLink, 0, size) + p.ResultLinks = tSlice + for i := 0; i < size; i ++ { + _elem40 := &TSparkArrowResultLink{} + if err := _elem40.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem40), err) + } + p.ResultLinks = append(p.ResultLinks, _elem40) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRowSet) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TDBSqlCloudResultFile, 0, size) + p.CloudFetchResults = tSlice + for i := 0; i < size; i ++ { + _elem41 := &TDBSqlCloudResultFile{} + if err := _elem41.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem41), err) + } + p.CloudFetchResults = append(p.CloudFetchResults, _elem41) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TRowSet) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TRowSet"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TRowSet) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startRowOffset", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:startRowOffset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.StartRowOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startRowOffset (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:startRowOffset: ", p), err) } + return err +} + +func (p *TRowSet) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "rows", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:rows: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Rows)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Rows { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:rows: ", p), err) } + return err +} + +func (p *TRowSet) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumns() { + if err := oprot.WriteFieldBegin(ctx, "columns", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:columns: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Columns)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Columns { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:columns: ", p), err) } + } + return err +} + +func (p *TRowSet) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBinaryColumns() { + if err := oprot.WriteFieldBegin(ctx, "binaryColumns", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:binaryColumns: ", p), err) } + if err := oprot.WriteBinary(ctx, p.BinaryColumns); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.binaryColumns (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:binaryColumns: ", p), err) } + } + return err +} + +func (p *TRowSet) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumnCount() { + if err := oprot.WriteFieldBegin(ctx, "columnCount", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:columnCount: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ColumnCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.columnCount (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:columnCount: ", p), err) } + } + return err +} + +func (p *TRowSet) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArrowBatches() { + if err := oprot.WriteFieldBegin(ctx, "arrowBatches", thrift.LIST, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:arrowBatches: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ArrowBatches)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ArrowBatches { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:arrowBatches: ", p), err) } + } + return err +} + +func (p *TRowSet) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultLinks() { + if err := oprot.WriteFieldBegin(ctx, "resultLinks", thrift.LIST, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:resultLinks: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ResultLinks)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ResultLinks { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:resultLinks: ", p), err) } + } + return err +} + +func (p *TRowSet) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCloudFetchResults() { + if err := oprot.WriteFieldBegin(ctx, "cloudFetchResults", thrift.LIST, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:cloudFetchResults: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.CloudFetchResults)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.CloudFetchResults { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:cloudFetchResults: ", p), err) } + } + return err +} + +func (p *TRowSet) Equals(other *TRowSet) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.StartRowOffset != other.StartRowOffset { return false } + if len(p.Rows) != len(other.Rows) { return false } + for i, _tgt := range p.Rows { + _src42 := other.Rows[i] + if !_tgt.Equals(_src42) { return false } + } + if len(p.Columns) != len(other.Columns) { return false } + for i, _tgt := range p.Columns { + _src43 := other.Columns[i] + if !_tgt.Equals(_src43) { return false } + } + if bytes.Compare(p.BinaryColumns, other.BinaryColumns) != 0 { return false } + if p.ColumnCount != other.ColumnCount { + if p.ColumnCount == nil || other.ColumnCount == nil { + return false + } + if (*p.ColumnCount) != (*other.ColumnCount) { return false } + } + if len(p.ArrowBatches) != len(other.ArrowBatches) { return false } + for i, _tgt := range p.ArrowBatches { + _src44 := other.ArrowBatches[i] + if !_tgt.Equals(_src44) { return false } + } + if len(p.ResultLinks) != len(other.ResultLinks) { return false } + for i, _tgt := range p.ResultLinks { + _src45 := other.ResultLinks[i] + if !_tgt.Equals(_src45) { return false } + } + if len(p.CloudFetchResults) != len(other.CloudFetchResults) { return false } + for i, _tgt := range p.CloudFetchResults { + _src46 := other.CloudFetchResults[i] + if !_tgt.Equals(_src46) { return false } + } + return true +} + +func (p *TRowSet) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRowSet(%+v)", *p) +} + +func (p *TRowSet) Validate() error { + return nil +} +// Attributes: +// - Name +// - SqlStatement +// - Properties +// - ViewSchema +type TDBSqlTempView struct { + Name *string `thrift:"name,1" db:"name" json:"name,omitempty"` + SqlStatement *string `thrift:"sqlStatement,2" db:"sqlStatement" json:"sqlStatement,omitempty"` + Properties map[string]string `thrift:"properties,3" db:"properties" json:"properties,omitempty"` + ViewSchema *string `thrift:"viewSchema,4" db:"viewSchema" json:"viewSchema,omitempty"` +} + +func NewTDBSqlTempView() *TDBSqlTempView { + return &TDBSqlTempView{} +} + +var TDBSqlTempView_Name_DEFAULT string +func (p *TDBSqlTempView) GetName() string { + if !p.IsSetName() { + return TDBSqlTempView_Name_DEFAULT + } +return *p.Name +} +var TDBSqlTempView_SqlStatement_DEFAULT string +func (p *TDBSqlTempView) GetSqlStatement() string { + if !p.IsSetSqlStatement() { + return TDBSqlTempView_SqlStatement_DEFAULT + } +return *p.SqlStatement +} +var TDBSqlTempView_Properties_DEFAULT map[string]string + +func (p *TDBSqlTempView) GetProperties() map[string]string { + return p.Properties +} +var TDBSqlTempView_ViewSchema_DEFAULT string +func (p *TDBSqlTempView) GetViewSchema() string { + if !p.IsSetViewSchema() { + return TDBSqlTempView_ViewSchema_DEFAULT + } +return *p.ViewSchema +} +func (p *TDBSqlTempView) IsSetName() bool { + return p.Name != nil +} + +func (p *TDBSqlTempView) IsSetSqlStatement() bool { + return p.SqlStatement != nil +} + +func (p *TDBSqlTempView) IsSetProperties() bool { + return p.Properties != nil +} + +func (p *TDBSqlTempView) IsSetViewSchema() bool { + return p.ViewSchema != nil +} + +func (p *TDBSqlTempView) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.MAP { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlTempView) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Name = &v +} + return nil +} + +func (p *TDBSqlTempView) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.SqlStatement = &v +} + return nil +} + +func (p *TDBSqlTempView) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Properties = tMap + for i := 0; i < size; i ++ { +var _key47 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key47 = v +} +var _val48 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val48 = v +} + p.Properties[_key47] = _val48 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TDBSqlTempView) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ViewSchema = &v +} + return nil +} + +func (p *TDBSqlTempView) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlTempView"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlTempView) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) } + } + return err +} + +func (p *TDBSqlTempView) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSqlStatement() { + if err := oprot.WriteFieldBegin(ctx, "sqlStatement", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sqlStatement: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SqlStatement)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sqlStatement (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sqlStatement: ", p), err) } + } + return err +} + +func (p *TDBSqlTempView) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetProperties() { + if err := oprot.WriteFieldBegin(ctx, "properties", thrift.MAP, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:properties: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.Properties)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Properties { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:properties: ", p), err) } + } + return err +} + +func (p *TDBSqlTempView) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetViewSchema() { + if err := oprot.WriteFieldBegin(ctx, "viewSchema", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:viewSchema: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ViewSchema)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.viewSchema (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:viewSchema: ", p), err) } + } + return err +} + +func (p *TDBSqlTempView) Equals(other *TDBSqlTempView) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Name != other.Name { + if p.Name == nil || other.Name == nil { + return false + } + if (*p.Name) != (*other.Name) { return false } + } + if p.SqlStatement != other.SqlStatement { + if p.SqlStatement == nil || other.SqlStatement == nil { + return false + } + if (*p.SqlStatement) != (*other.SqlStatement) { return false } + } + if len(p.Properties) != len(other.Properties) { return false } + for k, _tgt := range p.Properties { + _src49 := other.Properties[k] + if _tgt != _src49 { return false } + } + if p.ViewSchema != other.ViewSchema { + if p.ViewSchema == nil || other.ViewSchema == nil { + return false + } + if (*p.ViewSchema) != (*other.ViewSchema) { return false } + } + return true +} + +func (p *TDBSqlTempView) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlTempView(%+v)", *p) +} + +func (p *TDBSqlTempView) Validate() error { + return nil +} +// Attributes: +// - SupportsMultipleCatalogs +type TDBSqlSessionCapabilities struct { + SupportsMultipleCatalogs *bool `thrift:"supportsMultipleCatalogs,1" db:"supportsMultipleCatalogs" json:"supportsMultipleCatalogs,omitempty"` +} + +func NewTDBSqlSessionCapabilities() *TDBSqlSessionCapabilities { + return &TDBSqlSessionCapabilities{} +} + +var TDBSqlSessionCapabilities_SupportsMultipleCatalogs_DEFAULT bool +func (p *TDBSqlSessionCapabilities) GetSupportsMultipleCatalogs() bool { + if !p.IsSetSupportsMultipleCatalogs() { + return TDBSqlSessionCapabilities_SupportsMultipleCatalogs_DEFAULT + } +return *p.SupportsMultipleCatalogs +} +func (p *TDBSqlSessionCapabilities) IsSetSupportsMultipleCatalogs() bool { + return p.SupportsMultipleCatalogs != nil +} + +func (p *TDBSqlSessionCapabilities) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlSessionCapabilities) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.SupportsMultipleCatalogs = &v +} + return nil +} + +func (p *TDBSqlSessionCapabilities) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlSessionCapabilities"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlSessionCapabilities) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSupportsMultipleCatalogs() { + if err := oprot.WriteFieldBegin(ctx, "supportsMultipleCatalogs", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:supportsMultipleCatalogs: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.SupportsMultipleCatalogs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.supportsMultipleCatalogs (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:supportsMultipleCatalogs: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionCapabilities) Equals(other *TDBSqlSessionCapabilities) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.SupportsMultipleCatalogs != other.SupportsMultipleCatalogs { + if p.SupportsMultipleCatalogs == nil || other.SupportsMultipleCatalogs == nil { + return false + } + if (*p.SupportsMultipleCatalogs) != (*other.SupportsMultipleCatalogs) { return false } + } + return true +} + +func (p *TDBSqlSessionCapabilities) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlSessionCapabilities(%+v)", *p) +} + +func (p *TDBSqlSessionCapabilities) Validate() error { + return nil +} +// Attributes: +// - ClassName +// - Usage +// - Name +// - Extended +// - Db +// - Arguments +// - Examples +// - Note +// - Group +// - Since +// - Deprecated +// - Source +type TExpressionInfo struct { + ClassName *string `thrift:"className,1" db:"className" json:"className,omitempty"` + Usage *string `thrift:"usage,2" db:"usage" json:"usage,omitempty"` + Name *string `thrift:"name,3" db:"name" json:"name,omitempty"` + Extended *string `thrift:"extended,4" db:"extended" json:"extended,omitempty"` + Db *string `thrift:"db,5" db:"db" json:"db,omitempty"` + Arguments *string `thrift:"arguments,6" db:"arguments" json:"arguments,omitempty"` + Examples *string `thrift:"examples,7" db:"examples" json:"examples,omitempty"` + Note *string `thrift:"note,8" db:"note" json:"note,omitempty"` + Group *string `thrift:"group,9" db:"group" json:"group,omitempty"` + Since *string `thrift:"since,10" db:"since" json:"since,omitempty"` + Deprecated *string `thrift:"deprecated,11" db:"deprecated" json:"deprecated,omitempty"` + Source *string `thrift:"source,12" db:"source" json:"source,omitempty"` +} + +func NewTExpressionInfo() *TExpressionInfo { + return &TExpressionInfo{} +} + +var TExpressionInfo_ClassName_DEFAULT string +func (p *TExpressionInfo) GetClassName() string { + if !p.IsSetClassName() { + return TExpressionInfo_ClassName_DEFAULT + } +return *p.ClassName +} +var TExpressionInfo_Usage_DEFAULT string +func (p *TExpressionInfo) GetUsage() string { + if !p.IsSetUsage() { + return TExpressionInfo_Usage_DEFAULT + } +return *p.Usage +} +var TExpressionInfo_Name_DEFAULT string +func (p *TExpressionInfo) GetName() string { + if !p.IsSetName() { + return TExpressionInfo_Name_DEFAULT + } +return *p.Name +} +var TExpressionInfo_Extended_DEFAULT string +func (p *TExpressionInfo) GetExtended() string { + if !p.IsSetExtended() { + return TExpressionInfo_Extended_DEFAULT + } +return *p.Extended +} +var TExpressionInfo_Db_DEFAULT string +func (p *TExpressionInfo) GetDb() string { + if !p.IsSetDb() { + return TExpressionInfo_Db_DEFAULT + } +return *p.Db +} +var TExpressionInfo_Arguments_DEFAULT string +func (p *TExpressionInfo) GetArguments() string { + if !p.IsSetArguments() { + return TExpressionInfo_Arguments_DEFAULT + } +return *p.Arguments +} +var TExpressionInfo_Examples_DEFAULT string +func (p *TExpressionInfo) GetExamples() string { + if !p.IsSetExamples() { + return TExpressionInfo_Examples_DEFAULT + } +return *p.Examples +} +var TExpressionInfo_Note_DEFAULT string +func (p *TExpressionInfo) GetNote() string { + if !p.IsSetNote() { + return TExpressionInfo_Note_DEFAULT + } +return *p.Note +} +var TExpressionInfo_Group_DEFAULT string +func (p *TExpressionInfo) GetGroup() string { + if !p.IsSetGroup() { + return TExpressionInfo_Group_DEFAULT + } +return *p.Group +} +var TExpressionInfo_Since_DEFAULT string +func (p *TExpressionInfo) GetSince() string { + if !p.IsSetSince() { + return TExpressionInfo_Since_DEFAULT + } +return *p.Since +} +var TExpressionInfo_Deprecated_DEFAULT string +func (p *TExpressionInfo) GetDeprecated() string { + if !p.IsSetDeprecated() { + return TExpressionInfo_Deprecated_DEFAULT + } +return *p.Deprecated +} +var TExpressionInfo_Source_DEFAULT string +func (p *TExpressionInfo) GetSource() string { + if !p.IsSetSource() { + return TExpressionInfo_Source_DEFAULT + } +return *p.Source +} +func (p *TExpressionInfo) IsSetClassName() bool { + return p.ClassName != nil +} + +func (p *TExpressionInfo) IsSetUsage() bool { + return p.Usage != nil +} + +func (p *TExpressionInfo) IsSetName() bool { + return p.Name != nil +} + +func (p *TExpressionInfo) IsSetExtended() bool { + return p.Extended != nil +} + +func (p *TExpressionInfo) IsSetDb() bool { + return p.Db != nil +} + +func (p *TExpressionInfo) IsSetArguments() bool { + return p.Arguments != nil +} + +func (p *TExpressionInfo) IsSetExamples() bool { + return p.Examples != nil +} + +func (p *TExpressionInfo) IsSetNote() bool { + return p.Note != nil +} + +func (p *TExpressionInfo) IsSetGroup() bool { + return p.Group != nil +} + +func (p *TExpressionInfo) IsSetSince() bool { + return p.Since != nil +} + +func (p *TExpressionInfo) IsSetDeprecated() bool { + return p.Deprecated != nil +} + +func (p *TExpressionInfo) IsSetSource() bool { + return p.Source != nil +} + +func (p *TExpressionInfo) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRING { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.STRING { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.STRING { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TExpressionInfo) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ClassName = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Usage = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Name = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Extended = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.Db = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.Arguments = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.Examples = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) +} else { + p.Note = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.Group = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) +} else { + p.Since = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) +} else { + p.Deprecated = &v +} + return nil +} + +func (p *TExpressionInfo) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) +} else { + p.Source = &v +} + return nil +} + +func (p *TExpressionInfo) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TExpressionInfo"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + if err := p.writeField12(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TExpressionInfo) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetClassName() { + if err := oprot.WriteFieldBegin(ctx, "className", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:className: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ClassName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.className (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:className: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUsage() { + if err := oprot.WriteFieldBegin(ctx, "usage", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:usage: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Usage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.usage (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:usage: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExtended() { + if err := oprot.WriteFieldBegin(ctx, "extended", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:extended: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Extended)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.extended (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:extended: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDb() { + if err := oprot.WriteFieldBegin(ctx, "db", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:db: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Db)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.db (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:db: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArguments() { + if err := oprot.WriteFieldBegin(ctx, "arguments", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:arguments: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Arguments)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.arguments (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:arguments: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExamples() { + if err := oprot.WriteFieldBegin(ctx, "examples", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:examples: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Examples)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.examples (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:examples: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNote() { + if err := oprot.WriteFieldBegin(ctx, "note", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:note: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Note)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.note (8) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:note: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGroup() { + if err := oprot.WriteFieldBegin(ctx, "group", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:group: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Group)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.group (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:group: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSince() { + if err := oprot.WriteFieldBegin(ctx, "since", thrift.STRING, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:since: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Since)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.since (10) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:since: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDeprecated() { + if err := oprot.WriteFieldBegin(ctx, "deprecated", thrift.STRING, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:deprecated: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Deprecated)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.deprecated (11) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:deprecated: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSource() { + if err := oprot.WriteFieldBegin(ctx, "source", thrift.STRING, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:source: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Source)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.source (12) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:source: ", p), err) } + } + return err +} + +func (p *TExpressionInfo) Equals(other *TExpressionInfo) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ClassName != other.ClassName { + if p.ClassName == nil || other.ClassName == nil { + return false + } + if (*p.ClassName) != (*other.ClassName) { return false } + } + if p.Usage != other.Usage { + if p.Usage == nil || other.Usage == nil { + return false + } + if (*p.Usage) != (*other.Usage) { return false } + } + if p.Name != other.Name { + if p.Name == nil || other.Name == nil { + return false + } + if (*p.Name) != (*other.Name) { return false } + } + if p.Extended != other.Extended { + if p.Extended == nil || other.Extended == nil { + return false + } + if (*p.Extended) != (*other.Extended) { return false } + } + if p.Db != other.Db { + if p.Db == nil || other.Db == nil { + return false + } + if (*p.Db) != (*other.Db) { return false } + } + if p.Arguments != other.Arguments { + if p.Arguments == nil || other.Arguments == nil { + return false + } + if (*p.Arguments) != (*other.Arguments) { return false } + } + if p.Examples != other.Examples { + if p.Examples == nil || other.Examples == nil { + return false + } + if (*p.Examples) != (*other.Examples) { return false } + } + if p.Note != other.Note { + if p.Note == nil || other.Note == nil { + return false + } + if (*p.Note) != (*other.Note) { return false } + } + if p.Group != other.Group { + if p.Group == nil || other.Group == nil { + return false + } + if (*p.Group) != (*other.Group) { return false } + } + if p.Since != other.Since { + if p.Since == nil || other.Since == nil { + return false + } + if (*p.Since) != (*other.Since) { return false } + } + if p.Deprecated != other.Deprecated { + if p.Deprecated == nil || other.Deprecated == nil { + return false + } + if (*p.Deprecated) != (*other.Deprecated) { return false } + } + if p.Source != other.Source { + if p.Source == nil || other.Source == nil { + return false + } + if (*p.Source) != (*other.Source) { return false } + } + return true +} + +func (p *TExpressionInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TExpressionInfo(%+v)", *p) +} + +func (p *TExpressionInfo) Validate() error { + return nil +} +// Attributes: +// - Value +type TDBSqlConfValue struct { + Value *string `thrift:"value,1" db:"value" json:"value,omitempty"` +} + +func NewTDBSqlConfValue() *TDBSqlConfValue { + return &TDBSqlConfValue{} +} + +var TDBSqlConfValue_Value_DEFAULT string +func (p *TDBSqlConfValue) GetValue() string { + if !p.IsSetValue() { + return TDBSqlConfValue_Value_DEFAULT + } +return *p.Value +} +func (p *TDBSqlConfValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *TDBSqlConfValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlConfValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = &v +} + return nil +} + +func (p *TDBSqlConfValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlConfValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlConfValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + } + return err +} + +func (p *TDBSqlConfValue) Equals(other *TDBSqlConfValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Value != other.Value { + if p.Value == nil || other.Value == nil { + return false + } + if (*p.Value) != (*other.Value) { return false } + } + return true +} + +func (p *TDBSqlConfValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlConfValue(%+v)", *p) +} + +func (p *TDBSqlConfValue) Validate() error { + return nil +} +// Attributes: +// - Confs +// - TempViews +// - CurrentDatabase +// - CurrentCatalog +// - SessionCapabilities +// - ExpressionsInfos +// - InternalConfs +type TDBSqlSessionConf struct { + Confs map[string]string `thrift:"confs,1" db:"confs" json:"confs,omitempty"` + TempViews []*TDBSqlTempView `thrift:"tempViews,2" db:"tempViews" json:"tempViews,omitempty"` + CurrentDatabase *string `thrift:"currentDatabase,3" db:"currentDatabase" json:"currentDatabase,omitempty"` + CurrentCatalog *string `thrift:"currentCatalog,4" db:"currentCatalog" json:"currentCatalog,omitempty"` + SessionCapabilities *TDBSqlSessionCapabilities `thrift:"sessionCapabilities,5" db:"sessionCapabilities" json:"sessionCapabilities,omitempty"` + ExpressionsInfos []*TExpressionInfo `thrift:"expressionsInfos,6" db:"expressionsInfos" json:"expressionsInfos,omitempty"` + InternalConfs map[string]*TDBSqlConfValue `thrift:"internalConfs,7" db:"internalConfs" json:"internalConfs,omitempty"` +} + +func NewTDBSqlSessionConf() *TDBSqlSessionConf { + return &TDBSqlSessionConf{} +} + +var TDBSqlSessionConf_Confs_DEFAULT map[string]string + +func (p *TDBSqlSessionConf) GetConfs() map[string]string { + return p.Confs +} +var TDBSqlSessionConf_TempViews_DEFAULT []*TDBSqlTempView + +func (p *TDBSqlSessionConf) GetTempViews() []*TDBSqlTempView { + return p.TempViews +} +var TDBSqlSessionConf_CurrentDatabase_DEFAULT string +func (p *TDBSqlSessionConf) GetCurrentDatabase() string { + if !p.IsSetCurrentDatabase() { + return TDBSqlSessionConf_CurrentDatabase_DEFAULT + } +return *p.CurrentDatabase +} +var TDBSqlSessionConf_CurrentCatalog_DEFAULT string +func (p *TDBSqlSessionConf) GetCurrentCatalog() string { + if !p.IsSetCurrentCatalog() { + return TDBSqlSessionConf_CurrentCatalog_DEFAULT + } +return *p.CurrentCatalog +} +var TDBSqlSessionConf_SessionCapabilities_DEFAULT *TDBSqlSessionCapabilities +func (p *TDBSqlSessionConf) GetSessionCapabilities() *TDBSqlSessionCapabilities { + if !p.IsSetSessionCapabilities() { + return TDBSqlSessionConf_SessionCapabilities_DEFAULT + } +return p.SessionCapabilities +} +var TDBSqlSessionConf_ExpressionsInfos_DEFAULT []*TExpressionInfo + +func (p *TDBSqlSessionConf) GetExpressionsInfos() []*TExpressionInfo { + return p.ExpressionsInfos +} +var TDBSqlSessionConf_InternalConfs_DEFAULT map[string]*TDBSqlConfValue + +func (p *TDBSqlSessionConf) GetInternalConfs() map[string]*TDBSqlConfValue { + return p.InternalConfs +} +func (p *TDBSqlSessionConf) IsSetConfs() bool { + return p.Confs != nil +} + +func (p *TDBSqlSessionConf) IsSetTempViews() bool { + return p.TempViews != nil +} + +func (p *TDBSqlSessionConf) IsSetCurrentDatabase() bool { + return p.CurrentDatabase != nil +} + +func (p *TDBSqlSessionConf) IsSetCurrentCatalog() bool { + return p.CurrentCatalog != nil +} + +func (p *TDBSqlSessionConf) IsSetSessionCapabilities() bool { + return p.SessionCapabilities != nil +} + +func (p *TDBSqlSessionConf) IsSetExpressionsInfos() bool { + return p.ExpressionsInfos != nil +} + +func (p *TDBSqlSessionConf) IsSetInternalConfs() bool { + return p.InternalConfs != nil +} + +func (p *TDBSqlSessionConf) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.MAP { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlSessionConf) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Confs = tMap + for i := 0; i < size; i ++ { +var _key50 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key50 = v +} +var _val51 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val51 = v +} + p.Confs[_key50] = _val51 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TDBSqlSessionConf) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TDBSqlTempView, 0, size) + p.TempViews = tSlice + for i := 0; i < size; i ++ { + _elem52 := &TDBSqlTempView{} + if err := _elem52.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem52), err) + } + p.TempViews = append(p.TempViews, _elem52) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TDBSqlSessionConf) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.CurrentDatabase = &v +} + return nil +} + +func (p *TDBSqlSessionConf) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.CurrentCatalog = &v +} + return nil +} + +func (p *TDBSqlSessionConf) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionCapabilities = &TDBSqlSessionCapabilities{} + if err := p.SessionCapabilities.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionCapabilities), err) + } + return nil +} + +func (p *TDBSqlSessionConf) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TExpressionInfo, 0, size) + p.ExpressionsInfos = tSlice + for i := 0; i < size; i ++ { + _elem53 := &TExpressionInfo{} + if err := _elem53.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem53), err) + } + p.ExpressionsInfos = append(p.ExpressionsInfos, _elem53) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TDBSqlSessionConf) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]*TDBSqlConfValue, size) + p.InternalConfs = tMap + for i := 0; i < size; i ++ { +var _key54 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key54 = v +} + _val55 := &TDBSqlConfValue{} + if err := _val55.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val55), err) + } + p.InternalConfs[_key54] = _val55 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TDBSqlSessionConf) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlSessionConf"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlSessionConf) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConfs() { + if err := oprot.WriteFieldBegin(ctx, "confs", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:confs: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.Confs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Confs { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:confs: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTempViews() { + if err := oprot.WriteFieldBegin(ctx, "tempViews", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tempViews: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.TempViews)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.TempViews { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tempViews: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentDatabase() { + if err := oprot.WriteFieldBegin(ctx, "currentDatabase", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:currentDatabase: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CurrentDatabase)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.currentDatabase (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:currentDatabase: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentCatalog() { + if err := oprot.WriteFieldBegin(ctx, "currentCatalog", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:currentCatalog: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CurrentCatalog)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.currentCatalog (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:currentCatalog: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionCapabilities() { + if err := oprot.WriteFieldBegin(ctx, "sessionCapabilities", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:sessionCapabilities: ", p), err) } + if err := p.SessionCapabilities.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionCapabilities), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:sessionCapabilities: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExpressionsInfos() { + if err := oprot.WriteFieldBegin(ctx, "expressionsInfos", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:expressionsInfos: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ExpressionsInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ExpressionsInfos { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:expressionsInfos: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetInternalConfs() { + if err := oprot.WriteFieldBegin(ctx, "internalConfs", thrift.MAP, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:internalConfs: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRUCT, len(p.InternalConfs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.InternalConfs { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:internalConfs: ", p), err) } + } + return err +} + +func (p *TDBSqlSessionConf) Equals(other *TDBSqlSessionConf) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.Confs) != len(other.Confs) { return false } + for k, _tgt := range p.Confs { + _src56 := other.Confs[k] + if _tgt != _src56 { return false } + } + if len(p.TempViews) != len(other.TempViews) { return false } + for i, _tgt := range p.TempViews { + _src57 := other.TempViews[i] + if !_tgt.Equals(_src57) { return false } + } + if p.CurrentDatabase != other.CurrentDatabase { + if p.CurrentDatabase == nil || other.CurrentDatabase == nil { + return false + } + if (*p.CurrentDatabase) != (*other.CurrentDatabase) { return false } + } + if p.CurrentCatalog != other.CurrentCatalog { + if p.CurrentCatalog == nil || other.CurrentCatalog == nil { + return false + } + if (*p.CurrentCatalog) != (*other.CurrentCatalog) { return false } + } + if !p.SessionCapabilities.Equals(other.SessionCapabilities) { return false } + if len(p.ExpressionsInfos) != len(other.ExpressionsInfos) { return false } + for i, _tgt := range p.ExpressionsInfos { + _src58 := other.ExpressionsInfos[i] + if !_tgt.Equals(_src58) { return false } + } + if len(p.InternalConfs) != len(other.InternalConfs) { return false } + for k, _tgt := range p.InternalConfs { + _src59 := other.InternalConfs[k] + if !_tgt.Equals(_src59) { return false } + } + return true +} + +func (p *TDBSqlSessionConf) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlSessionConf(%+v)", *p) +} + +func (p *TDBSqlSessionConf) Validate() error { + return nil +} +// Attributes: +// - StatusCode +// - InfoMessages +// - SqlState +// - ErrorCode +// - ErrorMessage +// - DisplayMessage +// - ErrorDetailsJson +// - ResponseValidation +type TStatus struct { + StatusCode TStatusCode `thrift:"statusCode,1,required" db:"statusCode" json:"statusCode"` + InfoMessages []string `thrift:"infoMessages,2" db:"infoMessages" json:"infoMessages,omitempty"` + SqlState *string `thrift:"sqlState,3" db:"sqlState" json:"sqlState,omitempty"` + ErrorCode *int32 `thrift:"errorCode,4" db:"errorCode" json:"errorCode,omitempty"` + ErrorMessage *string `thrift:"errorMessage,5" db:"errorMessage" json:"errorMessage,omitempty"` + DisplayMessage *string `thrift:"displayMessage,6" db:"displayMessage" json:"displayMessage,omitempty"` + // unused fields # 7 to 1280 + ErrorDetailsJson *string `thrift:"errorDetailsJson,1281" db:"errorDetailsJson" json:"errorDetailsJson,omitempty"` + // unused fields # 1282 to 3328 + ResponseValidation []byte `thrift:"responseValidation,3329" db:"responseValidation" json:"responseValidation,omitempty"` +} + +func NewTStatus() *TStatus { + return &TStatus{} +} + + +func (p *TStatus) GetStatusCode() TStatusCode { + return p.StatusCode +} +var TStatus_InfoMessages_DEFAULT []string + +func (p *TStatus) GetInfoMessages() []string { + return p.InfoMessages +} +var TStatus_SqlState_DEFAULT string +func (p *TStatus) GetSqlState() string { + if !p.IsSetSqlState() { + return TStatus_SqlState_DEFAULT + } +return *p.SqlState +} +var TStatus_ErrorCode_DEFAULT int32 +func (p *TStatus) GetErrorCode() int32 { + if !p.IsSetErrorCode() { + return TStatus_ErrorCode_DEFAULT + } +return *p.ErrorCode +} +var TStatus_ErrorMessage_DEFAULT string +func (p *TStatus) GetErrorMessage() string { + if !p.IsSetErrorMessage() { + return TStatus_ErrorMessage_DEFAULT + } +return *p.ErrorMessage +} +var TStatus_DisplayMessage_DEFAULT string +func (p *TStatus) GetDisplayMessage() string { + if !p.IsSetDisplayMessage() { + return TStatus_DisplayMessage_DEFAULT + } +return *p.DisplayMessage +} +var TStatus_ErrorDetailsJson_DEFAULT string +func (p *TStatus) GetErrorDetailsJson() string { + if !p.IsSetErrorDetailsJson() { + return TStatus_ErrorDetailsJson_DEFAULT + } +return *p.ErrorDetailsJson +} +var TStatus_ResponseValidation_DEFAULT []byte + +func (p *TStatus) GetResponseValidation() []byte { + return p.ResponseValidation +} +func (p *TStatus) IsSetInfoMessages() bool { + return p.InfoMessages != nil +} + +func (p *TStatus) IsSetSqlState() bool { + return p.SqlState != nil +} + +func (p *TStatus) IsSetErrorCode() bool { + return p.ErrorCode != nil +} + +func (p *TStatus) IsSetErrorMessage() bool { + return p.ErrorMessage != nil +} + +func (p *TStatus) IsSetDisplayMessage() bool { + return p.DisplayMessage != nil +} + +func (p *TStatus) IsSetErrorDetailsJson() bool { + return p.ErrorDetailsJson != nil +} + +func (p *TStatus) IsSetResponseValidation() bool { + return p.ResponseValidation != nil +} + +func (p *TStatus) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatusCode bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatusCode = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatusCode{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StatusCode is not set")); + } + return nil +} + +func (p *TStatus) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TStatusCode(v) + p.StatusCode = temp +} + return nil +} + +func (p *TStatus) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.InfoMessages = tSlice + for i := 0; i < size; i ++ { +var _elem60 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem60 = v +} + p.InfoMessages = append(p.InfoMessages, _elem60) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TStatus) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SqlState = &v +} + return nil +} + +func (p *TStatus) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ErrorCode = &v +} + return nil +} + +func (p *TStatus) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.ErrorMessage = &v +} + return nil +} + +func (p *TStatus) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.DisplayMessage = &v +} + return nil +} + +func (p *TStatus) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1281: ", err) +} else { + p.ErrorDetailsJson = &v +} + return nil +} + +func (p *TStatus) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ResponseValidation = v +} + return nil +} + +func (p *TStatus) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TStatus"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TStatus) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "statusCode", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:statusCode: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.StatusCode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.statusCode (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:statusCode: ", p), err) } + return err +} + +func (p *TStatus) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetInfoMessages() { + if err := oprot.WriteFieldBegin(ctx, "infoMessages", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infoMessages: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.InfoMessages)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.InfoMessages { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infoMessages: ", p), err) } + } + return err +} + +func (p *TStatus) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSqlState() { + if err := oprot.WriteFieldBegin(ctx, "sqlState", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:sqlState: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SqlState)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sqlState (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:sqlState: ", p), err) } + } + return err +} + +func (p *TStatus) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorCode() { + if err := oprot.WriteFieldBegin(ctx, "errorCode", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:errorCode: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ErrorCode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorCode (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:errorCode: ", p), err) } + } + return err +} + +func (p *TStatus) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorMessage() { + if err := oprot.WriteFieldBegin(ctx, "errorMessage", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:errorMessage: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ErrorMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorMessage (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:errorMessage: ", p), err) } + } + return err +} + +func (p *TStatus) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDisplayMessage() { + if err := oprot.WriteFieldBegin(ctx, "displayMessage", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:displayMessage: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.DisplayMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.displayMessage (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:displayMessage: ", p), err) } + } + return err +} + +func (p *TStatus) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorDetailsJson() { + if err := oprot.WriteFieldBegin(ctx, "errorDetailsJson", thrift.STRING, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:errorDetailsJson: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ErrorDetailsJson)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorDetailsJson (1281) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:errorDetailsJson: ", p), err) } + } + return err +} + +func (p *TStatus) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResponseValidation() { + if err := oprot.WriteFieldBegin(ctx, "responseValidation", thrift.STRING, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:responseValidation: ", p), err) } + if err := oprot.WriteBinary(ctx, p.ResponseValidation); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.responseValidation (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:responseValidation: ", p), err) } + } + return err +} + +func (p *TStatus) Equals(other *TStatus) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.StatusCode != other.StatusCode { return false } + if len(p.InfoMessages) != len(other.InfoMessages) { return false } + for i, _tgt := range p.InfoMessages { + _src61 := other.InfoMessages[i] + if _tgt != _src61 { return false } + } + if p.SqlState != other.SqlState { + if p.SqlState == nil || other.SqlState == nil { + return false + } + if (*p.SqlState) != (*other.SqlState) { return false } + } + if p.ErrorCode != other.ErrorCode { + if p.ErrorCode == nil || other.ErrorCode == nil { + return false + } + if (*p.ErrorCode) != (*other.ErrorCode) { return false } + } + if p.ErrorMessage != other.ErrorMessage { + if p.ErrorMessage == nil || other.ErrorMessage == nil { + return false + } + if (*p.ErrorMessage) != (*other.ErrorMessage) { return false } + } + if p.DisplayMessage != other.DisplayMessage { + if p.DisplayMessage == nil || other.DisplayMessage == nil { + return false + } + if (*p.DisplayMessage) != (*other.DisplayMessage) { return false } + } + if p.ErrorDetailsJson != other.ErrorDetailsJson { + if p.ErrorDetailsJson == nil || other.ErrorDetailsJson == nil { + return false + } + if (*p.ErrorDetailsJson) != (*other.ErrorDetailsJson) { return false } + } + if bytes.Compare(p.ResponseValidation, other.ResponseValidation) != 0 { return false } + return true +} + +func (p *TStatus) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStatus(%+v)", *p) +} + +func (p *TStatus) Validate() error { + return nil +} +// Attributes: +// - CatalogName +// - SchemaName +type TNamespace struct { + CatalogName *TIdentifier `thrift:"catalogName,1" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TIdentifier `thrift:"schemaName,2" db:"schemaName" json:"schemaName,omitempty"` +} + +func NewTNamespace() *TNamespace { + return &TNamespace{} +} + +var TNamespace_CatalogName_DEFAULT TIdentifier +func (p *TNamespace) GetCatalogName() TIdentifier { + if !p.IsSetCatalogName() { + return TNamespace_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TNamespace_SchemaName_DEFAULT TIdentifier +func (p *TNamespace) GetSchemaName() TIdentifier { + if !p.IsSetSchemaName() { + return TNamespace_SchemaName_DEFAULT + } +return *p.SchemaName +} +func (p *TNamespace) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TNamespace) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TNamespace) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TNamespace) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TNamespace) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TNamespace) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TNamespace"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TNamespace) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:catalogName: ", p), err) } + } + return err +} + +func (p *TNamespace) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:schemaName: ", p), err) } + } + return err +} + +func (p *TNamespace) Equals(other *TNamespace) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + return true +} + +func (p *TNamespace) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TNamespace(%+v)", *p) +} + +func (p *TNamespace) Validate() error { + return nil +} +// Attributes: +// - GUID +// - Secret +// - ExecutionVersion +type THandleIdentifier struct { + GUID []byte `thrift:"guid,1,required" db:"guid" json:"guid"` + Secret []byte `thrift:"secret,2,required" db:"secret" json:"secret"` + // unused fields # 3 to 3328 + ExecutionVersion *int16 `thrift:"executionVersion,3329" db:"executionVersion" json:"executionVersion,omitempty"` +} + +func NewTHandleIdentifier() *THandleIdentifier { + return &THandleIdentifier{} +} + + +func (p *THandleIdentifier) GetGUID() []byte { + return p.GUID +} + +func (p *THandleIdentifier) GetSecret() []byte { + return p.Secret +} +var THandleIdentifier_ExecutionVersion_DEFAULT int16 +func (p *THandleIdentifier) GetExecutionVersion() int16 { + if !p.IsSetExecutionVersion() { + return THandleIdentifier_ExecutionVersion_DEFAULT + } +return *p.ExecutionVersion +} +func (p *THandleIdentifier) IsSetExecutionVersion() bool { + return p.ExecutionVersion != nil +} + +func (p *THandleIdentifier) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetGUID bool = false; + var issetSecret bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetGUID = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetSecret = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.I16 { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetGUID{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field GUID is not set")); + } + if !issetSecret{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Secret is not set")); + } + return nil +} + +func (p *THandleIdentifier) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.GUID = v +} + return nil +} + +func (p *THandleIdentifier) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Secret = v +} + return nil +} + +func (p *THandleIdentifier) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ExecutionVersion = &v +} + return nil +} + +func (p *THandleIdentifier) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "THandleIdentifier"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *THandleIdentifier) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "guid", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:guid: ", p), err) } + if err := oprot.WriteBinary(ctx, p.GUID); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.guid (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:guid: ", p), err) } + return err +} + +func (p *THandleIdentifier) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "secret", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:secret: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Secret); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.secret (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:secret: ", p), err) } + return err +} + +func (p *THandleIdentifier) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExecutionVersion() { + if err := oprot.WriteFieldBegin(ctx, "executionVersion", thrift.I16, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:executionVersion: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.ExecutionVersion)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.executionVersion (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:executionVersion: ", p), err) } + } + return err +} + +func (p *THandleIdentifier) Equals(other *THandleIdentifier) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if bytes.Compare(p.GUID, other.GUID) != 0 { return false } + if bytes.Compare(p.Secret, other.Secret) != 0 { return false } + if p.ExecutionVersion != other.ExecutionVersion { + if p.ExecutionVersion == nil || other.ExecutionVersion == nil { + return false + } + if (*p.ExecutionVersion) != (*other.ExecutionVersion) { return false } + } + return true +} + +func (p *THandleIdentifier) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("THandleIdentifier(%+v)", *p) +} + +func (p *THandleIdentifier) Validate() error { + return nil +} +// Attributes: +// - SessionId +// - ServerProtocolVersion +type TSessionHandle struct { + SessionId *THandleIdentifier `thrift:"sessionId,1,required" db:"sessionId" json:"sessionId"` + // unused fields # 2 to 3328 + ServerProtocolVersion *TProtocolVersion `thrift:"serverProtocolVersion,3329" db:"serverProtocolVersion" json:"serverProtocolVersion,omitempty"` +} + +func NewTSessionHandle() *TSessionHandle { + return &TSessionHandle{} +} + +var TSessionHandle_SessionId_DEFAULT *THandleIdentifier +func (p *TSessionHandle) GetSessionId() *THandleIdentifier { + if !p.IsSetSessionId() { + return TSessionHandle_SessionId_DEFAULT + } +return p.SessionId +} +var TSessionHandle_ServerProtocolVersion_DEFAULT TProtocolVersion +func (p *TSessionHandle) GetServerProtocolVersion() TProtocolVersion { + if !p.IsSetServerProtocolVersion() { + return TSessionHandle_ServerProtocolVersion_DEFAULT + } +return *p.ServerProtocolVersion +} +func (p *TSessionHandle) IsSetSessionId() bool { + return p.SessionId != nil +} + +func (p *TSessionHandle) IsSetServerProtocolVersion() bool { + return p.ServerProtocolVersion != nil +} + +func (p *TSessionHandle) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionId bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionId{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionId is not set")); + } + return nil +} + +func (p *TSessionHandle) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionId = &THandleIdentifier{} + if err := p.SessionId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionId), err) + } + return nil +} + +func (p *TSessionHandle) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + temp := TProtocolVersion(v) + p.ServerProtocolVersion = &temp +} + return nil +} + +func (p *TSessionHandle) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSessionHandle"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSessionHandle) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionId", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionId: ", p), err) } + if err := p.SessionId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionId: ", p), err) } + return err +} + +func (p *TSessionHandle) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetServerProtocolVersion() { + if err := oprot.WriteFieldBegin(ctx, "serverProtocolVersion", thrift.I32, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:serverProtocolVersion: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ServerProtocolVersion)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serverProtocolVersion (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:serverProtocolVersion: ", p), err) } + } + return err +} + +func (p *TSessionHandle) Equals(other *TSessionHandle) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionId.Equals(other.SessionId) { return false } + if p.ServerProtocolVersion != other.ServerProtocolVersion { + if p.ServerProtocolVersion == nil || other.ServerProtocolVersion == nil { + return false + } + if (*p.ServerProtocolVersion) != (*other.ServerProtocolVersion) { return false } + } + return true +} + +func (p *TSessionHandle) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSessionHandle(%+v)", *p) +} + +func (p *TSessionHandle) Validate() error { + return nil +} +// Attributes: +// - OperationId +// - OperationType +// - HasResultSet +// - ModifiedRowCount +type TOperationHandle struct { + OperationId *THandleIdentifier `thrift:"operationId,1,required" db:"operationId" json:"operationId"` + OperationType TOperationType `thrift:"operationType,2,required" db:"operationType" json:"operationType"` + HasResultSet bool `thrift:"hasResultSet,3,required" db:"hasResultSet" json:"hasResultSet"` + ModifiedRowCount *float64 `thrift:"modifiedRowCount,4" db:"modifiedRowCount" json:"modifiedRowCount,omitempty"` +} + +func NewTOperationHandle() *TOperationHandle { + return &TOperationHandle{} +} + +var TOperationHandle_OperationId_DEFAULT *THandleIdentifier +func (p *TOperationHandle) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TOperationHandle_OperationId_DEFAULT + } +return p.OperationId +} + +func (p *TOperationHandle) GetOperationType() TOperationType { + return p.OperationType +} + +func (p *TOperationHandle) GetHasResultSet() bool { + return p.HasResultSet +} +var TOperationHandle_ModifiedRowCount_DEFAULT float64 +func (p *TOperationHandle) GetModifiedRowCount() float64 { + if !p.IsSetModifiedRowCount() { + return TOperationHandle_ModifiedRowCount_DEFAULT + } +return *p.ModifiedRowCount +} +func (p *TOperationHandle) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TOperationHandle) IsSetModifiedRowCount() bool { + return p.ModifiedRowCount != nil +} + +func (p *TOperationHandle) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationId bool = false; + var issetOperationType bool = false; + var issetHasResultSet bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetOperationType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetHasResultSet = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationId{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationId is not set")); + } + if !issetOperationType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationType is not set")); + } + if !issetHasResultSet{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field HasResultSet is not set")); + } + return nil +} + +func (p *TOperationHandle) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TOperationHandle) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TOperationType(v) + p.OperationType = temp +} + return nil +} + +func (p *TOperationHandle) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.HasResultSet = v +} + return nil +} + +func (p *TOperationHandle) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ModifiedRowCount = &v +} + return nil +} + +func (p *TOperationHandle) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TOperationHandle"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TOperationHandle) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationId: ", p), err) } + return err +} + +func (p *TOperationHandle) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.OperationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationType (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationType: ", p), err) } + return err +} + +func (p *TOperationHandle) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "hasResultSet", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hasResultSet: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.HasResultSet)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hasResultSet (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hasResultSet: ", p), err) } + return err +} + +func (p *TOperationHandle) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetModifiedRowCount() { + if err := oprot.WriteFieldBegin(ctx, "modifiedRowCount", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:modifiedRowCount: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.ModifiedRowCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.modifiedRowCount (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:modifiedRowCount: ", p), err) } + } + return err +} + +func (p *TOperationHandle) Equals(other *TOperationHandle) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationId.Equals(other.OperationId) { return false } + if p.OperationType != other.OperationType { return false } + if p.HasResultSet != other.HasResultSet { return false } + if p.ModifiedRowCount != other.ModifiedRowCount { + if p.ModifiedRowCount == nil || other.ModifiedRowCount == nil { + return false + } + if (*p.ModifiedRowCount) != (*other.ModifiedRowCount) { return false } + } + return true +} + +func (p *TOperationHandle) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TOperationHandle(%+v)", *p) +} + +func (p *TOperationHandle) Validate() error { + return nil +} +// Attributes: +// - ClientProtocol +// - Username +// - Password +// - Configuration +// - GetInfos +// - ClientProtocolI64 +// - ConnectionProperties +// - InitialNamespace +// - CanUseMultipleCatalogs +// - SessionId +type TOpenSessionReq struct { + ClientProtocol TProtocolVersion `thrift:"client_protocol,1" db:"client_protocol" json:"client_protocol"` + Username *string `thrift:"username,2" db:"username" json:"username,omitempty"` + Password *string `thrift:"password,3" db:"password" json:"password,omitempty"` + Configuration map[string]string `thrift:"configuration,4" db:"configuration" json:"configuration,omitempty"` + // unused fields # 5 to 1280 + GetInfos []TGetInfoType `thrift:"getInfos,1281" db:"getInfos" json:"getInfos,omitempty"` + ClientProtocolI64 *int64 `thrift:"client_protocol_i64,1282" db:"client_protocol_i64" json:"client_protocol_i64,omitempty"` + ConnectionProperties map[string]string `thrift:"connectionProperties,1283" db:"connectionProperties" json:"connectionProperties,omitempty"` + InitialNamespace *TNamespace `thrift:"initialNamespace,1284" db:"initialNamespace" json:"initialNamespace,omitempty"` + CanUseMultipleCatalogs *bool `thrift:"canUseMultipleCatalogs,1285" db:"canUseMultipleCatalogs" json:"canUseMultipleCatalogs,omitempty"` + // unused fields # 1286 to 3328 + SessionId *THandleIdentifier `thrift:"sessionId,3329" db:"sessionId" json:"sessionId,omitempty"` +} + +func NewTOpenSessionReq() *TOpenSessionReq { + return &TOpenSessionReq{ +ClientProtocol: -7, +} +} + +var TOpenSessionReq_ClientProtocol_DEFAULT TProtocolVersion = -7 + +func (p *TOpenSessionReq) GetClientProtocol() TProtocolVersion { + return p.ClientProtocol +} +var TOpenSessionReq_Username_DEFAULT string +func (p *TOpenSessionReq) GetUsername() string { + if !p.IsSetUsername() { + return TOpenSessionReq_Username_DEFAULT + } +return *p.Username +} +var TOpenSessionReq_Password_DEFAULT string +func (p *TOpenSessionReq) GetPassword() string { + if !p.IsSetPassword() { + return TOpenSessionReq_Password_DEFAULT + } +return *p.Password +} +var TOpenSessionReq_Configuration_DEFAULT map[string]string + +func (p *TOpenSessionReq) GetConfiguration() map[string]string { + return p.Configuration +} +var TOpenSessionReq_GetInfos_DEFAULT []TGetInfoType + +func (p *TOpenSessionReq) GetGetInfos() []TGetInfoType { + return p.GetInfos +} +var TOpenSessionReq_ClientProtocolI64_DEFAULT int64 +func (p *TOpenSessionReq) GetClientProtocolI64() int64 { + if !p.IsSetClientProtocolI64() { + return TOpenSessionReq_ClientProtocolI64_DEFAULT + } +return *p.ClientProtocolI64 +} +var TOpenSessionReq_ConnectionProperties_DEFAULT map[string]string + +func (p *TOpenSessionReq) GetConnectionProperties() map[string]string { + return p.ConnectionProperties +} +var TOpenSessionReq_InitialNamespace_DEFAULT *TNamespace +func (p *TOpenSessionReq) GetInitialNamespace() *TNamespace { + if !p.IsSetInitialNamespace() { + return TOpenSessionReq_InitialNamespace_DEFAULT + } +return p.InitialNamespace +} +var TOpenSessionReq_CanUseMultipleCatalogs_DEFAULT bool +func (p *TOpenSessionReq) GetCanUseMultipleCatalogs() bool { + if !p.IsSetCanUseMultipleCatalogs() { + return TOpenSessionReq_CanUseMultipleCatalogs_DEFAULT + } +return *p.CanUseMultipleCatalogs +} +var TOpenSessionReq_SessionId_DEFAULT *THandleIdentifier +func (p *TOpenSessionReq) GetSessionId() *THandleIdentifier { + if !p.IsSetSessionId() { + return TOpenSessionReq_SessionId_DEFAULT + } +return p.SessionId +} +func (p *TOpenSessionReq) IsSetClientProtocol() bool { + return p.ClientProtocol != TOpenSessionReq_ClientProtocol_DEFAULT +} + +func (p *TOpenSessionReq) IsSetUsername() bool { + return p.Username != nil +} + +func (p *TOpenSessionReq) IsSetPassword() bool { + return p.Password != nil +} + +func (p *TOpenSessionReq) IsSetConfiguration() bool { + return p.Configuration != nil +} + +func (p *TOpenSessionReq) IsSetGetInfos() bool { + return p.GetInfos != nil +} + +func (p *TOpenSessionReq) IsSetClientProtocolI64() bool { + return p.ClientProtocolI64 != nil +} + +func (p *TOpenSessionReq) IsSetConnectionProperties() bool { + return p.ConnectionProperties != nil +} + +func (p *TOpenSessionReq) IsSetInitialNamespace() bool { + return p.InitialNamespace != nil +} + +func (p *TOpenSessionReq) IsSetCanUseMultipleCatalogs() bool { + return p.CanUseMultipleCatalogs != nil +} + +func (p *TOpenSessionReq) IsSetSessionId() bool { + return p.SessionId != nil +} + +func (p *TOpenSessionReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.MAP { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1283: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1283(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1284: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1284(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1285: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1285(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TOpenSessionReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := TProtocolVersion(v) + p.ClientProtocol = temp +} + return nil +} + +func (p *TOpenSessionReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Username = &v +} + return nil +} + +func (p *TOpenSessionReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Password = &v +} + return nil +} + +func (p *TOpenSessionReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Configuration = tMap + for i := 0; i < size; i ++ { +var _key62 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key62 = v +} +var _val63 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val63 = v +} + p.Configuration[_key62] = _val63 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TOpenSessionReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]TGetInfoType, 0, size) + p.GetInfos = tSlice + for i := 0; i < size; i ++ { +var _elem64 TGetInfoType + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := TGetInfoType(v) + _elem64 = temp +} + p.GetInfos = append(p.GetInfos, _elem64) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TOpenSessionReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.ClientProtocolI64 = &v +} + return nil +} + +func (p *TOpenSessionReq) ReadField1283(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.ConnectionProperties = tMap + for i := 0; i < size; i ++ { +var _key65 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key65 = v +} +var _val66 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val66 = v +} + p.ConnectionProperties[_key65] = _val66 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TOpenSessionReq) ReadField1284(ctx context.Context, iprot thrift.TProtocol) error { + p.InitialNamespace = &TNamespace{} + if err := p.InitialNamespace.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.InitialNamespace), err) + } + return nil +} + +func (p *TOpenSessionReq) ReadField1285(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1285: ", err) +} else { + p.CanUseMultipleCatalogs = &v +} + return nil +} + +func (p *TOpenSessionReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionId = &THandleIdentifier{} + if err := p.SessionId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionId), err) + } + return nil +} + +func (p *TOpenSessionReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TOpenSessionReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField1283(ctx, oprot); err != nil { return err } + if err := p.writeField1284(ctx, oprot); err != nil { return err } + if err := p.writeField1285(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TOpenSessionReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetClientProtocol() { + if err := oprot.WriteFieldBegin(ctx, "client_protocol", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:client_protocol: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.ClientProtocol)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_protocol (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:client_protocol: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUsername() { + if err := oprot.WriteFieldBegin(ctx, "username", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:username: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Username)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.username (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:username: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPassword() { + if err := oprot.WriteFieldBegin(ctx, "password", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:password: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Password)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.password (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:password: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConfiguration() { + if err := oprot.WriteFieldBegin(ctx, "configuration", thrift.MAP, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:configuration: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.Configuration)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Configuration { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:configuration: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetInfos() { + if err := oprot.WriteFieldBegin(ctx, "getInfos", thrift.LIST, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getInfos: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.I32, len(p.GetInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.GetInfos { + if err := oprot.WriteI32(ctx, int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getInfos: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetClientProtocolI64() { + if err := oprot.WriteFieldBegin(ctx, "client_protocol_i64", thrift.I64, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:client_protocol_i64: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ClientProtocolI64)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_protocol_i64 (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:client_protocol_i64: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField1283(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConnectionProperties() { + if err := oprot.WriteFieldBegin(ctx, "connectionProperties", thrift.MAP, 1283); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1283:connectionProperties: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.ConnectionProperties)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.ConnectionProperties { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1283:connectionProperties: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField1284(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetInitialNamespace() { + if err := oprot.WriteFieldBegin(ctx, "initialNamespace", thrift.STRUCT, 1284); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1284:initialNamespace: ", p), err) } + if err := p.InitialNamespace.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.InitialNamespace), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1284:initialNamespace: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField1285(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCanUseMultipleCatalogs() { + if err := oprot.WriteFieldBegin(ctx, "canUseMultipleCatalogs", thrift.BOOL, 1285); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1285:canUseMultipleCatalogs: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.CanUseMultipleCatalogs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.canUseMultipleCatalogs (1285) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1285:canUseMultipleCatalogs: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionId() { + if err := oprot.WriteFieldBegin(ctx, "sessionId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:sessionId: ", p), err) } + if err := p.SessionId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:sessionId: ", p), err) } + } + return err +} + +func (p *TOpenSessionReq) Equals(other *TOpenSessionReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ClientProtocol != other.ClientProtocol { return false } + if p.Username != other.Username { + if p.Username == nil || other.Username == nil { + return false + } + if (*p.Username) != (*other.Username) { return false } + } + if p.Password != other.Password { + if p.Password == nil || other.Password == nil { + return false + } + if (*p.Password) != (*other.Password) { return false } + } + if len(p.Configuration) != len(other.Configuration) { return false } + for k, _tgt := range p.Configuration { + _src67 := other.Configuration[k] + if _tgt != _src67 { return false } + } + if len(p.GetInfos) != len(other.GetInfos) { return false } + for i, _tgt := range p.GetInfos { + _src68 := other.GetInfos[i] + if _tgt != _src68 { return false } + } + if p.ClientProtocolI64 != other.ClientProtocolI64 { + if p.ClientProtocolI64 == nil || other.ClientProtocolI64 == nil { + return false + } + if (*p.ClientProtocolI64) != (*other.ClientProtocolI64) { return false } + } + if len(p.ConnectionProperties) != len(other.ConnectionProperties) { return false } + for k, _tgt := range p.ConnectionProperties { + _src69 := other.ConnectionProperties[k] + if _tgt != _src69 { return false } + } + if !p.InitialNamespace.Equals(other.InitialNamespace) { return false } + if p.CanUseMultipleCatalogs != other.CanUseMultipleCatalogs { + if p.CanUseMultipleCatalogs == nil || other.CanUseMultipleCatalogs == nil { + return false + } + if (*p.CanUseMultipleCatalogs) != (*other.CanUseMultipleCatalogs) { return false } + } + if !p.SessionId.Equals(other.SessionId) { return false } + return true +} + +func (p *TOpenSessionReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TOpenSessionReq(%+v)", *p) +} + +func (p *TOpenSessionReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - ServerProtocolVersion +// - SessionHandle +// - Configuration +// - InitialNamespace +// - CanUseMultipleCatalogs +// - GetInfos +type TOpenSessionResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + ServerProtocolVersion TProtocolVersion `thrift:"serverProtocolVersion,2,required" db:"serverProtocolVersion" json:"serverProtocolVersion"` + SessionHandle *TSessionHandle `thrift:"sessionHandle,3" db:"sessionHandle" json:"sessionHandle,omitempty"` + Configuration map[string]string `thrift:"configuration,4" db:"configuration" json:"configuration,omitempty"` + // unused fields # 5 to 1280 + GetInfos []*TGetInfoValue `thrift:"getInfos,1281" db:"getInfos" json:"getInfos,omitempty"` + // unused fields # 1282 to 1283 + InitialNamespace *TNamespace `thrift:"initialNamespace,1284" db:"initialNamespace" json:"initialNamespace,omitempty"` + CanUseMultipleCatalogs *bool `thrift:"canUseMultipleCatalogs,1285" db:"canUseMultipleCatalogs" json:"canUseMultipleCatalogs,omitempty"` +} + +func NewTOpenSessionResp() *TOpenSessionResp { + return &TOpenSessionResp{} +} + +var TOpenSessionResp_Status_DEFAULT *TStatus +func (p *TOpenSessionResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TOpenSessionResp_Status_DEFAULT + } +return p.Status +} + +func (p *TOpenSessionResp) GetServerProtocolVersion() TProtocolVersion { + return p.ServerProtocolVersion +} +var TOpenSessionResp_SessionHandle_DEFAULT *TSessionHandle +func (p *TOpenSessionResp) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TOpenSessionResp_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TOpenSessionResp_Configuration_DEFAULT map[string]string + +func (p *TOpenSessionResp) GetConfiguration() map[string]string { + return p.Configuration +} +var TOpenSessionResp_InitialNamespace_DEFAULT *TNamespace +func (p *TOpenSessionResp) GetInitialNamespace() *TNamespace { + if !p.IsSetInitialNamespace() { + return TOpenSessionResp_InitialNamespace_DEFAULT + } +return p.InitialNamespace +} +var TOpenSessionResp_CanUseMultipleCatalogs_DEFAULT bool +func (p *TOpenSessionResp) GetCanUseMultipleCatalogs() bool { + if !p.IsSetCanUseMultipleCatalogs() { + return TOpenSessionResp_CanUseMultipleCatalogs_DEFAULT + } +return *p.CanUseMultipleCatalogs +} +var TOpenSessionResp_GetInfos_DEFAULT []*TGetInfoValue + +func (p *TOpenSessionResp) GetGetInfos() []*TGetInfoValue { + return p.GetInfos +} +func (p *TOpenSessionResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TOpenSessionResp) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TOpenSessionResp) IsSetConfiguration() bool { + return p.Configuration != nil +} + +func (p *TOpenSessionResp) IsSetInitialNamespace() bool { + return p.InitialNamespace != nil +} + +func (p *TOpenSessionResp) IsSetCanUseMultipleCatalogs() bool { + return p.CanUseMultipleCatalogs != nil +} + +func (p *TOpenSessionResp) IsSetGetInfos() bool { + return p.GetInfos != nil +} + +func (p *TOpenSessionResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + var issetServerProtocolVersion bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetServerProtocolVersion = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.MAP { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1284: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1284(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1285: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1285(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + if !issetServerProtocolVersion{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServerProtocolVersion is not set")); + } + return nil +} + +func (p *TOpenSessionResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TOpenSessionResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TProtocolVersion(v) + p.ServerProtocolVersion = temp +} + return nil +} + +func (p *TOpenSessionResp) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TOpenSessionResp) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Configuration = tMap + for i := 0; i < size; i ++ { +var _key70 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key70 = v +} +var _val71 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val71 = v +} + p.Configuration[_key70] = _val71 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TOpenSessionResp) ReadField1284(ctx context.Context, iprot thrift.TProtocol) error { + p.InitialNamespace = &TNamespace{} + if err := p.InitialNamespace.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.InitialNamespace), err) + } + return nil +} + +func (p *TOpenSessionResp) ReadField1285(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1285: ", err) +} else { + p.CanUseMultipleCatalogs = &v +} + return nil +} + +func (p *TOpenSessionResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TGetInfoValue, 0, size) + p.GetInfos = tSlice + for i := 0; i < size; i ++ { + _elem72 := &TGetInfoValue{} + if err := _elem72.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem72), err) + } + p.GetInfos = append(p.GetInfos, _elem72) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TOpenSessionResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TOpenSessionResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1284(ctx, oprot); err != nil { return err } + if err := p.writeField1285(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TOpenSessionResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TOpenSessionResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serverProtocolVersion", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:serverProtocolVersion: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.ServerProtocolVersion)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serverProtocolVersion (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:serverProtocolVersion: ", p), err) } + return err +} + +func (p *TOpenSessionResp) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionHandle() { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:sessionHandle: ", p), err) } + } + return err +} + +func (p *TOpenSessionResp) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConfiguration() { + if err := oprot.WriteFieldBegin(ctx, "configuration", thrift.MAP, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:configuration: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.Configuration)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Configuration { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:configuration: ", p), err) } + } + return err +} + +func (p *TOpenSessionResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetInfos() { + if err := oprot.WriteFieldBegin(ctx, "getInfos", thrift.LIST, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getInfos: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.GetInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.GetInfos { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getInfos: ", p), err) } + } + return err +} + +func (p *TOpenSessionResp) writeField1284(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetInitialNamespace() { + if err := oprot.WriteFieldBegin(ctx, "initialNamespace", thrift.STRUCT, 1284); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1284:initialNamespace: ", p), err) } + if err := p.InitialNamespace.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.InitialNamespace), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1284:initialNamespace: ", p), err) } + } + return err +} + +func (p *TOpenSessionResp) writeField1285(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCanUseMultipleCatalogs() { + if err := oprot.WriteFieldBegin(ctx, "canUseMultipleCatalogs", thrift.BOOL, 1285); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1285:canUseMultipleCatalogs: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.CanUseMultipleCatalogs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.canUseMultipleCatalogs (1285) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1285:canUseMultipleCatalogs: ", p), err) } + } + return err +} + +func (p *TOpenSessionResp) Equals(other *TOpenSessionResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if p.ServerProtocolVersion != other.ServerProtocolVersion { return false } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if len(p.Configuration) != len(other.Configuration) { return false } + for k, _tgt := range p.Configuration { + _src73 := other.Configuration[k] + if _tgt != _src73 { return false } + } + if len(p.GetInfos) != len(other.GetInfos) { return false } + for i, _tgt := range p.GetInfos { + _src74 := other.GetInfos[i] + if !_tgt.Equals(_src74) { return false } + } + if !p.InitialNamespace.Equals(other.InitialNamespace) { return false } + if p.CanUseMultipleCatalogs != other.CanUseMultipleCatalogs { + if p.CanUseMultipleCatalogs == nil || other.CanUseMultipleCatalogs == nil { + return false + } + if (*p.CanUseMultipleCatalogs) != (*other.CanUseMultipleCatalogs) { return false } + } + return true +} + +func (p *TOpenSessionResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TOpenSessionResp(%+v)", *p) +} + +func (p *TOpenSessionResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +type TCloseSessionReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` +} + +func NewTCloseSessionReq() *TCloseSessionReq { + return &TCloseSessionReq{} +} + +var TCloseSessionReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TCloseSessionReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TCloseSessionReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +func (p *TCloseSessionReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TCloseSessionReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TCloseSessionReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TCloseSessionReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCloseSessionReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCloseSessionReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TCloseSessionReq) Equals(other *TCloseSessionReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + return true +} + +func (p *TCloseSessionReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCloseSessionReq(%+v)", *p) +} + +func (p *TCloseSessionReq) Validate() error { + return nil +} +// Attributes: +// - Status +type TCloseSessionResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` +} + +func NewTCloseSessionResp() *TCloseSessionResp { + return &TCloseSessionResp{} +} + +var TCloseSessionResp_Status_DEFAULT *TStatus +func (p *TCloseSessionResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TCloseSessionResp_Status_DEFAULT + } +return p.Status +} +func (p *TCloseSessionResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCloseSessionResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TCloseSessionResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TCloseSessionResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCloseSessionResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCloseSessionResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TCloseSessionResp) Equals(other *TCloseSessionResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + return true +} + +func (p *TCloseSessionResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCloseSessionResp(%+v)", *p) +} + +func (p *TCloseSessionResp) Validate() error { + return nil +} +// Attributes: +// - StringValue +// - SmallIntValue +// - IntegerBitmask +// - IntegerFlag +// - BinaryValue +// - LenValue +type TGetInfoValue struct { + StringValue *string `thrift:"stringValue,1" db:"stringValue" json:"stringValue,omitempty"` + SmallIntValue *int16 `thrift:"smallIntValue,2" db:"smallIntValue" json:"smallIntValue,omitempty"` + IntegerBitmask *int32 `thrift:"integerBitmask,3" db:"integerBitmask" json:"integerBitmask,omitempty"` + IntegerFlag *int32 `thrift:"integerFlag,4" db:"integerFlag" json:"integerFlag,omitempty"` + BinaryValue *int32 `thrift:"binaryValue,5" db:"binaryValue" json:"binaryValue,omitempty"` + LenValue *int64 `thrift:"lenValue,6" db:"lenValue" json:"lenValue,omitempty"` +} + +func NewTGetInfoValue() *TGetInfoValue { + return &TGetInfoValue{} +} + +var TGetInfoValue_StringValue_DEFAULT string +func (p *TGetInfoValue) GetStringValue() string { + if !p.IsSetStringValue() { + return TGetInfoValue_StringValue_DEFAULT + } +return *p.StringValue +} +var TGetInfoValue_SmallIntValue_DEFAULT int16 +func (p *TGetInfoValue) GetSmallIntValue() int16 { + if !p.IsSetSmallIntValue() { + return TGetInfoValue_SmallIntValue_DEFAULT + } +return *p.SmallIntValue +} +var TGetInfoValue_IntegerBitmask_DEFAULT int32 +func (p *TGetInfoValue) GetIntegerBitmask() int32 { + if !p.IsSetIntegerBitmask() { + return TGetInfoValue_IntegerBitmask_DEFAULT + } +return *p.IntegerBitmask +} +var TGetInfoValue_IntegerFlag_DEFAULT int32 +func (p *TGetInfoValue) GetIntegerFlag() int32 { + if !p.IsSetIntegerFlag() { + return TGetInfoValue_IntegerFlag_DEFAULT + } +return *p.IntegerFlag +} +var TGetInfoValue_BinaryValue_DEFAULT int32 +func (p *TGetInfoValue) GetBinaryValue() int32 { + if !p.IsSetBinaryValue() { + return TGetInfoValue_BinaryValue_DEFAULT + } +return *p.BinaryValue +} +var TGetInfoValue_LenValue_DEFAULT int64 +func (p *TGetInfoValue) GetLenValue() int64 { + if !p.IsSetLenValue() { + return TGetInfoValue_LenValue_DEFAULT + } +return *p.LenValue +} +func (p *TGetInfoValue) CountSetFieldsTGetInfoValue() int { + count := 0 + if (p.IsSetStringValue()) { + count++ + } + if (p.IsSetSmallIntValue()) { + count++ + } + if (p.IsSetIntegerBitmask()) { + count++ + } + if (p.IsSetIntegerFlag()) { + count++ + } + if (p.IsSetBinaryValue()) { + count++ + } + if (p.IsSetLenValue()) { + count++ + } + return count + +} + +func (p *TGetInfoValue) IsSetStringValue() bool { + return p.StringValue != nil +} + +func (p *TGetInfoValue) IsSetSmallIntValue() bool { + return p.SmallIntValue != nil +} + +func (p *TGetInfoValue) IsSetIntegerBitmask() bool { + return p.IntegerBitmask != nil +} + +func (p *TGetInfoValue) IsSetIntegerFlag() bool { + return p.IntegerFlag != nil +} + +func (p *TGetInfoValue) IsSetBinaryValue() bool { + return p.BinaryValue != nil +} + +func (p *TGetInfoValue) IsSetLenValue() bool { + return p.LenValue != nil +} + +func (p *TGetInfoValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TGetInfoValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.StringValue = &v +} + return nil +} + +func (p *TGetInfoValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.SmallIntValue = &v +} + return nil +} + +func (p *TGetInfoValue) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.IntegerBitmask = &v +} + return nil +} + +func (p *TGetInfoValue) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.IntegerFlag = &v +} + return nil +} + +func (p *TGetInfoValue) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.BinaryValue = &v +} + return nil +} + +func (p *TGetInfoValue) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.LenValue = &v +} + return nil +} + +func (p *TGetInfoValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTGetInfoValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TGetInfoValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetInfoValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStringValue() { + if err := oprot.WriteFieldBegin(ctx, "stringValue", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:stringValue: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.StringValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stringValue (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:stringValue: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSmallIntValue() { + if err := oprot.WriteFieldBegin(ctx, "smallIntValue", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:smallIntValue: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.SmallIntValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.smallIntValue (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:smallIntValue: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIntegerBitmask() { + if err := oprot.WriteFieldBegin(ctx, "integerBitmask", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:integerBitmask: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.IntegerBitmask)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.integerBitmask (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:integerBitmask: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIntegerFlag() { + if err := oprot.WriteFieldBegin(ctx, "integerFlag", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:integerFlag: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.IntegerFlag)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.integerFlag (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:integerFlag: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBinaryValue() { + if err := oprot.WriteFieldBegin(ctx, "binaryValue", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:binaryValue: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.BinaryValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.binaryValue (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:binaryValue: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLenValue() { + if err := oprot.WriteFieldBegin(ctx, "lenValue", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:lenValue: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.LenValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.lenValue (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:lenValue: ", p), err) } + } + return err +} + +func (p *TGetInfoValue) Equals(other *TGetInfoValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.StringValue != other.StringValue { + if p.StringValue == nil || other.StringValue == nil { + return false + } + if (*p.StringValue) != (*other.StringValue) { return false } + } + if p.SmallIntValue != other.SmallIntValue { + if p.SmallIntValue == nil || other.SmallIntValue == nil { + return false + } + if (*p.SmallIntValue) != (*other.SmallIntValue) { return false } + } + if p.IntegerBitmask != other.IntegerBitmask { + if p.IntegerBitmask == nil || other.IntegerBitmask == nil { + return false + } + if (*p.IntegerBitmask) != (*other.IntegerBitmask) { return false } + } + if p.IntegerFlag != other.IntegerFlag { + if p.IntegerFlag == nil || other.IntegerFlag == nil { + return false + } + if (*p.IntegerFlag) != (*other.IntegerFlag) { return false } + } + if p.BinaryValue != other.BinaryValue { + if p.BinaryValue == nil || other.BinaryValue == nil { + return false + } + if (*p.BinaryValue) != (*other.BinaryValue) { return false } + } + if p.LenValue != other.LenValue { + if p.LenValue == nil || other.LenValue == nil { + return false + } + if (*p.LenValue) != (*other.LenValue) { return false } + } + return true +} + +func (p *TGetInfoValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetInfoValue(%+v)", *p) +} + +func (p *TGetInfoValue) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - InfoType +// - SessionConf +type TGetInfoReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + InfoType TGetInfoType `thrift:"infoType,2,required" db:"infoType" json:"infoType"` + // unused fields # 3 to 3328 + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3329" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetInfoReq() *TGetInfoReq { + return &TGetInfoReq{} +} + +var TGetInfoReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetInfoReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetInfoReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} + +func (p *TGetInfoReq) GetInfoType() TGetInfoType { + return p.InfoType +} +var TGetInfoReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetInfoReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetInfoReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetInfoReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetInfoReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetInfoReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetInfoType bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetInfoType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetInfoType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field InfoType is not set")); + } + return nil +} + +func (p *TGetInfoReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetInfoReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TGetInfoType(v) + p.InfoType = temp +} + return nil +} + +func (p *TGetInfoReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetInfoReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetInfoReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetInfoReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetInfoReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "infoType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infoType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.InfoType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.infoType (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infoType: ", p), err) } + return err +} + +func (p *TGetInfoReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetInfoReq) Equals(other *TGetInfoReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.InfoType != other.InfoType { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetInfoReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetInfoReq(%+v)", *p) +} + +func (p *TGetInfoReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - InfoValue +type TGetInfoResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + InfoValue *TGetInfoValue `thrift:"infoValue,2,required" db:"infoValue" json:"infoValue"` +} + +func NewTGetInfoResp() *TGetInfoResp { + return &TGetInfoResp{} +} + +var TGetInfoResp_Status_DEFAULT *TStatus +func (p *TGetInfoResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetInfoResp_Status_DEFAULT + } +return p.Status +} +var TGetInfoResp_InfoValue_DEFAULT *TGetInfoValue +func (p *TGetInfoResp) GetInfoValue() *TGetInfoValue { + if !p.IsSetInfoValue() { + return TGetInfoResp_InfoValue_DEFAULT + } +return p.InfoValue +} +func (p *TGetInfoResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetInfoResp) IsSetInfoValue() bool { + return p.InfoValue != nil +} + +func (p *TGetInfoResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + var issetInfoValue bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetInfoValue = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + if !issetInfoValue{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field InfoValue is not set")); + } + return nil +} + +func (p *TGetInfoResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetInfoResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.InfoValue = &TGetInfoValue{} + if err := p.InfoValue.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.InfoValue), err) + } + return nil +} + +func (p *TGetInfoResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetInfoResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetInfoResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetInfoResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "infoValue", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infoValue: ", p), err) } + if err := p.InfoValue.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.InfoValue), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infoValue: ", p), err) } + return err +} + +func (p *TGetInfoResp) Equals(other *TGetInfoResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.InfoValue.Equals(other.InfoValue) { return false } + return true +} + +func (p *TGetInfoResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetInfoResp(%+v)", *p) +} + +func (p *TGetInfoResp) Validate() error { + return nil +} +// Attributes: +// - MaxRows +// - MaxBytes +type TSparkGetDirectResults struct { + MaxRows int64 `thrift:"maxRows,1,required" db:"maxRows" json:"maxRows"` + MaxBytes *int64 `thrift:"maxBytes,2" db:"maxBytes" json:"maxBytes,omitempty"` +} + +func NewTSparkGetDirectResults() *TSparkGetDirectResults { + return &TSparkGetDirectResults{} +} + + +func (p *TSparkGetDirectResults) GetMaxRows() int64 { + return p.MaxRows +} +var TSparkGetDirectResults_MaxBytes_DEFAULT int64 +func (p *TSparkGetDirectResults) GetMaxBytes() int64 { + if !p.IsSetMaxBytes() { + return TSparkGetDirectResults_MaxBytes_DEFAULT + } +return *p.MaxBytes +} +func (p *TSparkGetDirectResults) IsSetMaxBytes() bool { + return p.MaxBytes != nil +} + +func (p *TSparkGetDirectResults) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetMaxRows bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetMaxRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetMaxRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxRows is not set")); + } + return nil +} + +func (p *TSparkGetDirectResults) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.MaxRows = v +} + return nil +} + +func (p *TSparkGetDirectResults) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.MaxBytes = &v +} + return nil +} + +func (p *TSparkGetDirectResults) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkGetDirectResults"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkGetDirectResults) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "maxRows", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxRows: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.MaxRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxRows (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxRows: ", p), err) } + return err +} + +func (p *TSparkGetDirectResults) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxBytes() { + if err := oprot.WriteFieldBegin(ctx, "maxBytes", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.MaxBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxBytes (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxBytes: ", p), err) } + } + return err +} + +func (p *TSparkGetDirectResults) Equals(other *TSparkGetDirectResults) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.MaxRows != other.MaxRows { return false } + if p.MaxBytes != other.MaxBytes { + if p.MaxBytes == nil || other.MaxBytes == nil { + return false + } + if (*p.MaxBytes) != (*other.MaxBytes) { return false } + } + return true +} + +func (p *TSparkGetDirectResults) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkGetDirectResults(%+v)", *p) +} + +func (p *TSparkGetDirectResults) Validate() error { + return nil +} +// Attributes: +// - OperationStatus +// - ResultSetMetadata +// - ResultSet +// - CloseOperation +type TSparkDirectResults struct { + OperationStatus *TGetOperationStatusResp `thrift:"operationStatus,1" db:"operationStatus" json:"operationStatus,omitempty"` + ResultSetMetadata *TGetResultSetMetadataResp `thrift:"resultSetMetadata,2" db:"resultSetMetadata" json:"resultSetMetadata,omitempty"` + ResultSet *TFetchResultsResp `thrift:"resultSet,3" db:"resultSet" json:"resultSet,omitempty"` + CloseOperation *TCloseOperationResp `thrift:"closeOperation,4" db:"closeOperation" json:"closeOperation,omitempty"` +} + +func NewTSparkDirectResults() *TSparkDirectResults { + return &TSparkDirectResults{} +} + +var TSparkDirectResults_OperationStatus_DEFAULT *TGetOperationStatusResp +func (p *TSparkDirectResults) GetOperationStatus() *TGetOperationStatusResp { + if !p.IsSetOperationStatus() { + return TSparkDirectResults_OperationStatus_DEFAULT + } +return p.OperationStatus +} +var TSparkDirectResults_ResultSetMetadata_DEFAULT *TGetResultSetMetadataResp +func (p *TSparkDirectResults) GetResultSetMetadata() *TGetResultSetMetadataResp { + if !p.IsSetResultSetMetadata() { + return TSparkDirectResults_ResultSetMetadata_DEFAULT + } +return p.ResultSetMetadata +} +var TSparkDirectResults_ResultSet_DEFAULT *TFetchResultsResp +func (p *TSparkDirectResults) GetResultSet() *TFetchResultsResp { + if !p.IsSetResultSet() { + return TSparkDirectResults_ResultSet_DEFAULT + } +return p.ResultSet +} +var TSparkDirectResults_CloseOperation_DEFAULT *TCloseOperationResp +func (p *TSparkDirectResults) GetCloseOperation() *TCloseOperationResp { + if !p.IsSetCloseOperation() { + return TSparkDirectResults_CloseOperation_DEFAULT + } +return p.CloseOperation +} +func (p *TSparkDirectResults) IsSetOperationStatus() bool { + return p.OperationStatus != nil +} + +func (p *TSparkDirectResults) IsSetResultSetMetadata() bool { + return p.ResultSetMetadata != nil +} + +func (p *TSparkDirectResults) IsSetResultSet() bool { + return p.ResultSet != nil +} + +func (p *TSparkDirectResults) IsSetCloseOperation() bool { + return p.CloseOperation != nil +} + +func (p *TSparkDirectResults) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TSparkDirectResults) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationStatus = &TGetOperationStatusResp{} + if err := p.OperationStatus.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationStatus), err) + } + return nil +} + +func (p *TSparkDirectResults) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ResultSetMetadata = &TGetResultSetMetadataResp{} + if err := p.ResultSetMetadata.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ResultSetMetadata), err) + } + return nil +} + +func (p *TSparkDirectResults) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.ResultSet = &TFetchResultsResp{} + if err := p.ResultSet.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ResultSet), err) + } + return nil +} + +func (p *TSparkDirectResults) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.CloseOperation = &TCloseOperationResp{} + if err := p.CloseOperation.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CloseOperation), err) + } + return nil +} + +func (p *TSparkDirectResults) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkDirectResults"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkDirectResults) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationStatus() { + if err := oprot.WriteFieldBegin(ctx, "operationStatus", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationStatus: ", p), err) } + if err := p.OperationStatus.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationStatus), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationStatus: ", p), err) } + } + return err +} + +func (p *TSparkDirectResults) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultSetMetadata() { + if err := oprot.WriteFieldBegin(ctx, "resultSetMetadata", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:resultSetMetadata: ", p), err) } + if err := p.ResultSetMetadata.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ResultSetMetadata), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:resultSetMetadata: ", p), err) } + } + return err +} + +func (p *TSparkDirectResults) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultSet() { + if err := oprot.WriteFieldBegin(ctx, "resultSet", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:resultSet: ", p), err) } + if err := p.ResultSet.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ResultSet), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:resultSet: ", p), err) } + } + return err +} + +func (p *TSparkDirectResults) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCloseOperation() { + if err := oprot.WriteFieldBegin(ctx, "closeOperation", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:closeOperation: ", p), err) } + if err := p.CloseOperation.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CloseOperation), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:closeOperation: ", p), err) } + } + return err +} + +func (p *TSparkDirectResults) Equals(other *TSparkDirectResults) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationStatus.Equals(other.OperationStatus) { return false } + if !p.ResultSetMetadata.Equals(other.ResultSetMetadata) { return false } + if !p.ResultSet.Equals(other.ResultSet) { return false } + if !p.CloseOperation.Equals(other.CloseOperation) { return false } + return true +} + +func (p *TSparkDirectResults) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkDirectResults(%+v)", *p) +} + +func (p *TSparkDirectResults) Validate() error { + return nil +} +// Attributes: +// - TimestampAsArrow +// - DecimalAsArrow +// - ComplexTypesAsArrow +// - IntervalTypesAsArrow +// - NullTypeAsArrow +type TSparkArrowTypes struct { + TimestampAsArrow *bool `thrift:"timestampAsArrow,1" db:"timestampAsArrow" json:"timestampAsArrow,omitempty"` + DecimalAsArrow *bool `thrift:"decimalAsArrow,2" db:"decimalAsArrow" json:"decimalAsArrow,omitempty"` + ComplexTypesAsArrow *bool `thrift:"complexTypesAsArrow,3" db:"complexTypesAsArrow" json:"complexTypesAsArrow,omitempty"` + IntervalTypesAsArrow *bool `thrift:"intervalTypesAsArrow,4" db:"intervalTypesAsArrow" json:"intervalTypesAsArrow,omitempty"` + NullTypeAsArrow *bool `thrift:"nullTypeAsArrow,5" db:"nullTypeAsArrow" json:"nullTypeAsArrow,omitempty"` +} + +func NewTSparkArrowTypes() *TSparkArrowTypes { + return &TSparkArrowTypes{} +} + +var TSparkArrowTypes_TimestampAsArrow_DEFAULT bool +func (p *TSparkArrowTypes) GetTimestampAsArrow() bool { + if !p.IsSetTimestampAsArrow() { + return TSparkArrowTypes_TimestampAsArrow_DEFAULT + } +return *p.TimestampAsArrow +} +var TSparkArrowTypes_DecimalAsArrow_DEFAULT bool +func (p *TSparkArrowTypes) GetDecimalAsArrow() bool { + if !p.IsSetDecimalAsArrow() { + return TSparkArrowTypes_DecimalAsArrow_DEFAULT + } +return *p.DecimalAsArrow +} +var TSparkArrowTypes_ComplexTypesAsArrow_DEFAULT bool +func (p *TSparkArrowTypes) GetComplexTypesAsArrow() bool { + if !p.IsSetComplexTypesAsArrow() { + return TSparkArrowTypes_ComplexTypesAsArrow_DEFAULT + } +return *p.ComplexTypesAsArrow +} +var TSparkArrowTypes_IntervalTypesAsArrow_DEFAULT bool +func (p *TSparkArrowTypes) GetIntervalTypesAsArrow() bool { + if !p.IsSetIntervalTypesAsArrow() { + return TSparkArrowTypes_IntervalTypesAsArrow_DEFAULT + } +return *p.IntervalTypesAsArrow +} +var TSparkArrowTypes_NullTypeAsArrow_DEFAULT bool +func (p *TSparkArrowTypes) GetNullTypeAsArrow() bool { + if !p.IsSetNullTypeAsArrow() { + return TSparkArrowTypes_NullTypeAsArrow_DEFAULT + } +return *p.NullTypeAsArrow +} +func (p *TSparkArrowTypes) IsSetTimestampAsArrow() bool { + return p.TimestampAsArrow != nil +} + +func (p *TSparkArrowTypes) IsSetDecimalAsArrow() bool { + return p.DecimalAsArrow != nil +} + +func (p *TSparkArrowTypes) IsSetComplexTypesAsArrow() bool { + return p.ComplexTypesAsArrow != nil +} + +func (p *TSparkArrowTypes) IsSetIntervalTypesAsArrow() bool { + return p.IntervalTypesAsArrow != nil +} + +func (p *TSparkArrowTypes) IsSetNullTypeAsArrow() bool { + return p.NullTypeAsArrow != nil +} + +func (p *TSparkArrowTypes) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TSparkArrowTypes) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.TimestampAsArrow = &v +} + return nil +} + +func (p *TSparkArrowTypes) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DecimalAsArrow = &v +} + return nil +} + +func (p *TSparkArrowTypes) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.ComplexTypesAsArrow = &v +} + return nil +} + +func (p *TSparkArrowTypes) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.IntervalTypesAsArrow = &v +} + return nil +} + +func (p *TSparkArrowTypes) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.NullTypeAsArrow = &v +} + return nil +} + +func (p *TSparkArrowTypes) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkArrowTypes"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkArrowTypes) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestampAsArrow() { + if err := oprot.WriteFieldBegin(ctx, "timestampAsArrow", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestampAsArrow: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.TimestampAsArrow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestampAsArrow (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestampAsArrow: ", p), err) } + } + return err +} + +func (p *TSparkArrowTypes) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDecimalAsArrow() { + if err := oprot.WriteFieldBegin(ctx, "decimalAsArrow", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:decimalAsArrow: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.DecimalAsArrow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decimalAsArrow (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:decimalAsArrow: ", p), err) } + } + return err +} + +func (p *TSparkArrowTypes) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetComplexTypesAsArrow() { + if err := oprot.WriteFieldBegin(ctx, "complexTypesAsArrow", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:complexTypesAsArrow: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.ComplexTypesAsArrow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.complexTypesAsArrow (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:complexTypesAsArrow: ", p), err) } + } + return err +} + +func (p *TSparkArrowTypes) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIntervalTypesAsArrow() { + if err := oprot.WriteFieldBegin(ctx, "intervalTypesAsArrow", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:intervalTypesAsArrow: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IntervalTypesAsArrow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.intervalTypesAsArrow (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:intervalTypesAsArrow: ", p), err) } + } + return err +} + +func (p *TSparkArrowTypes) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNullTypeAsArrow() { + if err := oprot.WriteFieldBegin(ctx, "nullTypeAsArrow", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:nullTypeAsArrow: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.NullTypeAsArrow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.nullTypeAsArrow (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:nullTypeAsArrow: ", p), err) } + } + return err +} + +func (p *TSparkArrowTypes) Equals(other *TSparkArrowTypes) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TimestampAsArrow != other.TimestampAsArrow { + if p.TimestampAsArrow == nil || other.TimestampAsArrow == nil { + return false + } + if (*p.TimestampAsArrow) != (*other.TimestampAsArrow) { return false } + } + if p.DecimalAsArrow != other.DecimalAsArrow { + if p.DecimalAsArrow == nil || other.DecimalAsArrow == nil { + return false + } + if (*p.DecimalAsArrow) != (*other.DecimalAsArrow) { return false } + } + if p.ComplexTypesAsArrow != other.ComplexTypesAsArrow { + if p.ComplexTypesAsArrow == nil || other.ComplexTypesAsArrow == nil { + return false + } + if (*p.ComplexTypesAsArrow) != (*other.ComplexTypesAsArrow) { return false } + } + if p.IntervalTypesAsArrow != other.IntervalTypesAsArrow { + if p.IntervalTypesAsArrow == nil || other.IntervalTypesAsArrow == nil { + return false + } + if (*p.IntervalTypesAsArrow) != (*other.IntervalTypesAsArrow) { return false } + } + if p.NullTypeAsArrow != other.NullTypeAsArrow { + if p.NullTypeAsArrow == nil || other.NullTypeAsArrow == nil { + return false + } + if (*p.NullTypeAsArrow) != (*other.NullTypeAsArrow) { return false } + } + return true +} + +func (p *TSparkArrowTypes) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkArrowTypes(%+v)", *p) +} + +func (p *TSparkArrowTypes) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - Statement +// - ConfOverlay +// - RunAsync +// - GetDirectResults +// - QueryTimeout +// - CanReadArrowResult_ +// - CanDownloadResult_ +// - CanDecompressLZ4Result_ +// - MaxBytesPerFile +// - UseArrowNativeTypes +// - ResultRowLimit +// - Parameters +// - MaxBytesPerBatch +// - StatementConf +// - OperationId +// - SessionConf +// - RejectHighCostQueries +// - EstimatedCost +// - ExecutionVersion +// - RequestValidation +// - ResultPersistenceMode +// - TrimArrowBatchesToLimit +// - FetchDisposition +// - EnforceResultPersistenceMode +// - StatementList +// - PersistResultManifest +// - ResultRetentionSeconds +// - ResultByteLimit +// - ResultDataFormat +// - OriginatingClientIdentity +// - PreferSingleFileResult_ +// - PreferDriverOnlyUpload +// - EnforceEmbeddedSchemaCorrectness +// - IdempotencyToken +// - ThrowErrorOnByteLimitTruncation +type TExecuteStatementReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + Statement string `thrift:"statement,2,required" db:"statement" json:"statement"` + ConfOverlay map[string]string `thrift:"confOverlay,3" db:"confOverlay" json:"confOverlay,omitempty"` + RunAsync bool `thrift:"runAsync,4" db:"runAsync" json:"runAsync"` + QueryTimeout int64 `thrift:"queryTimeout,5" db:"queryTimeout" json:"queryTimeout"` + // unused fields # 6 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + CanReadArrowResult_ *bool `thrift:"canReadArrowResult,1282" db:"canReadArrowResult" json:"canReadArrowResult,omitempty"` + CanDownloadResult_ *bool `thrift:"canDownloadResult,1283" db:"canDownloadResult" json:"canDownloadResult,omitempty"` + CanDecompressLZ4Result_ *bool `thrift:"canDecompressLZ4Result,1284" db:"canDecompressLZ4Result" json:"canDecompressLZ4Result,omitempty"` + MaxBytesPerFile *int64 `thrift:"maxBytesPerFile,1285" db:"maxBytesPerFile" json:"maxBytesPerFile,omitempty"` + UseArrowNativeTypes *TSparkArrowTypes `thrift:"useArrowNativeTypes,1286" db:"useArrowNativeTypes" json:"useArrowNativeTypes,omitempty"` + ResultRowLimit *int64 `thrift:"resultRowLimit,1287" db:"resultRowLimit" json:"resultRowLimit,omitempty"` + Parameters []*TSparkParameter `thrift:"parameters,1288" db:"parameters" json:"parameters,omitempty"` + MaxBytesPerBatch *int64 `thrift:"maxBytesPerBatch,1289" db:"maxBytesPerBatch" json:"maxBytesPerBatch,omitempty"` + // unused fields # 1290 to 1295 + StatementConf *TStatementConf `thrift:"statementConf,1296" db:"statementConf" json:"statementConf,omitempty"` + // unused fields # 1297 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` + RejectHighCostQueries *bool `thrift:"rejectHighCostQueries,3331" db:"rejectHighCostQueries" json:"rejectHighCostQueries,omitempty"` + EstimatedCost *float64 `thrift:"estimatedCost,3332" db:"estimatedCost" json:"estimatedCost,omitempty"` + ExecutionVersion *int16 `thrift:"executionVersion,3333" db:"executionVersion" json:"executionVersion,omitempty"` + RequestValidation []byte `thrift:"requestValidation,3334" db:"requestValidation" json:"requestValidation,omitempty"` + ResultPersistenceMode *TResultPersistenceMode `thrift:"resultPersistenceMode,3335" db:"resultPersistenceMode" json:"resultPersistenceMode,omitempty"` + TrimArrowBatchesToLimit *bool `thrift:"trimArrowBatchesToLimit,3336" db:"trimArrowBatchesToLimit" json:"trimArrowBatchesToLimit,omitempty"` + FetchDisposition *TDBSqlFetchDisposition `thrift:"fetchDisposition,3337" db:"fetchDisposition" json:"fetchDisposition,omitempty"` + // unused fields # 3338 to 3343 + EnforceResultPersistenceMode *bool `thrift:"enforceResultPersistenceMode,3344" db:"enforceResultPersistenceMode" json:"enforceResultPersistenceMode,omitempty"` + StatementList []*TDBSqlStatement `thrift:"statementList,3345" db:"statementList" json:"statementList,omitempty"` + PersistResultManifest *bool `thrift:"persistResultManifest,3346" db:"persistResultManifest" json:"persistResultManifest,omitempty"` + ResultRetentionSeconds *int64 `thrift:"resultRetentionSeconds,3347" db:"resultRetentionSeconds" json:"resultRetentionSeconds,omitempty"` + ResultByteLimit *int64 `thrift:"resultByteLimit,3348" db:"resultByteLimit" json:"resultByteLimit,omitempty"` + ResultDataFormat *TDBSqlResultFormat `thrift:"resultDataFormat,3349" db:"resultDataFormat" json:"resultDataFormat,omitempty"` + OriginatingClientIdentity *string `thrift:"originatingClientIdentity,3350" db:"originatingClientIdentity" json:"originatingClientIdentity,omitempty"` + PreferSingleFileResult_ *bool `thrift:"preferSingleFileResult,3351" db:"preferSingleFileResult" json:"preferSingleFileResult,omitempty"` + PreferDriverOnlyUpload *bool `thrift:"preferDriverOnlyUpload,3352" db:"preferDriverOnlyUpload" json:"preferDriverOnlyUpload,omitempty"` + EnforceEmbeddedSchemaCorrectness bool `thrift:"enforceEmbeddedSchemaCorrectness,3353" db:"enforceEmbeddedSchemaCorrectness" json:"enforceEmbeddedSchemaCorrectness"` + // unused fields # 3354 to 3359 + IdempotencyToken *string `thrift:"idempotencyToken,3360" db:"idempotencyToken" json:"idempotencyToken,omitempty"` + ThrowErrorOnByteLimitTruncation *bool `thrift:"throwErrorOnByteLimitTruncation,3361" db:"throwErrorOnByteLimitTruncation" json:"throwErrorOnByteLimitTruncation,omitempty"` +} + +func NewTExecuteStatementReq() *TExecuteStatementReq { + return &TExecuteStatementReq{} +} + +var TExecuteStatementReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TExecuteStatementReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TExecuteStatementReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} + +func (p *TExecuteStatementReq) GetStatement() string { + return p.Statement +} +var TExecuteStatementReq_ConfOverlay_DEFAULT map[string]string + +func (p *TExecuteStatementReq) GetConfOverlay() map[string]string { + return p.ConfOverlay +} +var TExecuteStatementReq_RunAsync_DEFAULT bool = false + +func (p *TExecuteStatementReq) GetRunAsync() bool { + return p.RunAsync +} +var TExecuteStatementReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TExecuteStatementReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TExecuteStatementReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TExecuteStatementReq_QueryTimeout_DEFAULT int64 = 0 + +func (p *TExecuteStatementReq) GetQueryTimeout() int64 { + return p.QueryTimeout +} +var TExecuteStatementReq_CanReadArrowResult__DEFAULT bool +func (p *TExecuteStatementReq) GetCanReadArrowResult_() bool { + if !p.IsSetCanReadArrowResult_() { + return TExecuteStatementReq_CanReadArrowResult__DEFAULT + } +return *p.CanReadArrowResult_ +} +var TExecuteStatementReq_CanDownloadResult__DEFAULT bool +func (p *TExecuteStatementReq) GetCanDownloadResult_() bool { + if !p.IsSetCanDownloadResult_() { + return TExecuteStatementReq_CanDownloadResult__DEFAULT + } +return *p.CanDownloadResult_ +} +var TExecuteStatementReq_CanDecompressLZ4Result__DEFAULT bool +func (p *TExecuteStatementReq) GetCanDecompressLZ4Result_() bool { + if !p.IsSetCanDecompressLZ4Result_() { + return TExecuteStatementReq_CanDecompressLZ4Result__DEFAULT + } +return *p.CanDecompressLZ4Result_ +} +var TExecuteStatementReq_MaxBytesPerFile_DEFAULT int64 +func (p *TExecuteStatementReq) GetMaxBytesPerFile() int64 { + if !p.IsSetMaxBytesPerFile() { + return TExecuteStatementReq_MaxBytesPerFile_DEFAULT + } +return *p.MaxBytesPerFile +} +var TExecuteStatementReq_UseArrowNativeTypes_DEFAULT *TSparkArrowTypes +func (p *TExecuteStatementReq) GetUseArrowNativeTypes() *TSparkArrowTypes { + if !p.IsSetUseArrowNativeTypes() { + return TExecuteStatementReq_UseArrowNativeTypes_DEFAULT + } +return p.UseArrowNativeTypes +} +var TExecuteStatementReq_ResultRowLimit_DEFAULT int64 +func (p *TExecuteStatementReq) GetResultRowLimit() int64 { + if !p.IsSetResultRowLimit() { + return TExecuteStatementReq_ResultRowLimit_DEFAULT + } +return *p.ResultRowLimit +} +var TExecuteStatementReq_Parameters_DEFAULT []*TSparkParameter + +func (p *TExecuteStatementReq) GetParameters() []*TSparkParameter { + return p.Parameters +} +var TExecuteStatementReq_MaxBytesPerBatch_DEFAULT int64 +func (p *TExecuteStatementReq) GetMaxBytesPerBatch() int64 { + if !p.IsSetMaxBytesPerBatch() { + return TExecuteStatementReq_MaxBytesPerBatch_DEFAULT + } +return *p.MaxBytesPerBatch +} +var TExecuteStatementReq_StatementConf_DEFAULT *TStatementConf +func (p *TExecuteStatementReq) GetStatementConf() *TStatementConf { + if !p.IsSetStatementConf() { + return TExecuteStatementReq_StatementConf_DEFAULT + } +return p.StatementConf +} +var TExecuteStatementReq_OperationId_DEFAULT *THandleIdentifier +func (p *TExecuteStatementReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TExecuteStatementReq_OperationId_DEFAULT + } +return p.OperationId +} +var TExecuteStatementReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TExecuteStatementReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TExecuteStatementReq_SessionConf_DEFAULT + } +return p.SessionConf +} +var TExecuteStatementReq_RejectHighCostQueries_DEFAULT bool +func (p *TExecuteStatementReq) GetRejectHighCostQueries() bool { + if !p.IsSetRejectHighCostQueries() { + return TExecuteStatementReq_RejectHighCostQueries_DEFAULT + } +return *p.RejectHighCostQueries +} +var TExecuteStatementReq_EstimatedCost_DEFAULT float64 +func (p *TExecuteStatementReq) GetEstimatedCost() float64 { + if !p.IsSetEstimatedCost() { + return TExecuteStatementReq_EstimatedCost_DEFAULT + } +return *p.EstimatedCost +} +var TExecuteStatementReq_ExecutionVersion_DEFAULT int16 +func (p *TExecuteStatementReq) GetExecutionVersion() int16 { + if !p.IsSetExecutionVersion() { + return TExecuteStatementReq_ExecutionVersion_DEFAULT + } +return *p.ExecutionVersion +} +var TExecuteStatementReq_RequestValidation_DEFAULT []byte + +func (p *TExecuteStatementReq) GetRequestValidation() []byte { + return p.RequestValidation +} +var TExecuteStatementReq_ResultPersistenceMode_DEFAULT TResultPersistenceMode +func (p *TExecuteStatementReq) GetResultPersistenceMode() TResultPersistenceMode { + if !p.IsSetResultPersistenceMode() { + return TExecuteStatementReq_ResultPersistenceMode_DEFAULT + } +return *p.ResultPersistenceMode +} +var TExecuteStatementReq_TrimArrowBatchesToLimit_DEFAULT bool +func (p *TExecuteStatementReq) GetTrimArrowBatchesToLimit() bool { + if !p.IsSetTrimArrowBatchesToLimit() { + return TExecuteStatementReq_TrimArrowBatchesToLimit_DEFAULT + } +return *p.TrimArrowBatchesToLimit +} +var TExecuteStatementReq_FetchDisposition_DEFAULT TDBSqlFetchDisposition +func (p *TExecuteStatementReq) GetFetchDisposition() TDBSqlFetchDisposition { + if !p.IsSetFetchDisposition() { + return TExecuteStatementReq_FetchDisposition_DEFAULT + } +return *p.FetchDisposition +} +var TExecuteStatementReq_EnforceResultPersistenceMode_DEFAULT bool +func (p *TExecuteStatementReq) GetEnforceResultPersistenceMode() bool { + if !p.IsSetEnforceResultPersistenceMode() { + return TExecuteStatementReq_EnforceResultPersistenceMode_DEFAULT + } +return *p.EnforceResultPersistenceMode +} +var TExecuteStatementReq_StatementList_DEFAULT []*TDBSqlStatement + +func (p *TExecuteStatementReq) GetStatementList() []*TDBSqlStatement { + return p.StatementList +} +var TExecuteStatementReq_PersistResultManifest_DEFAULT bool +func (p *TExecuteStatementReq) GetPersistResultManifest() bool { + if !p.IsSetPersistResultManifest() { + return TExecuteStatementReq_PersistResultManifest_DEFAULT + } +return *p.PersistResultManifest +} +var TExecuteStatementReq_ResultRetentionSeconds_DEFAULT int64 +func (p *TExecuteStatementReq) GetResultRetentionSeconds() int64 { + if !p.IsSetResultRetentionSeconds() { + return TExecuteStatementReq_ResultRetentionSeconds_DEFAULT + } +return *p.ResultRetentionSeconds +} +var TExecuteStatementReq_ResultByteLimit_DEFAULT int64 +func (p *TExecuteStatementReq) GetResultByteLimit() int64 { + if !p.IsSetResultByteLimit() { + return TExecuteStatementReq_ResultByteLimit_DEFAULT + } +return *p.ResultByteLimit +} +var TExecuteStatementReq_ResultDataFormat_DEFAULT *TDBSqlResultFormat +func (p *TExecuteStatementReq) GetResultDataFormat() *TDBSqlResultFormat { + if !p.IsSetResultDataFormat() { + return TExecuteStatementReq_ResultDataFormat_DEFAULT + } +return p.ResultDataFormat +} +var TExecuteStatementReq_OriginatingClientIdentity_DEFAULT string +func (p *TExecuteStatementReq) GetOriginatingClientIdentity() string { + if !p.IsSetOriginatingClientIdentity() { + return TExecuteStatementReq_OriginatingClientIdentity_DEFAULT + } +return *p.OriginatingClientIdentity +} +var TExecuteStatementReq_PreferSingleFileResult__DEFAULT bool +func (p *TExecuteStatementReq) GetPreferSingleFileResult_() bool { + if !p.IsSetPreferSingleFileResult_() { + return TExecuteStatementReq_PreferSingleFileResult__DEFAULT + } +return *p.PreferSingleFileResult_ +} +var TExecuteStatementReq_PreferDriverOnlyUpload_DEFAULT bool +func (p *TExecuteStatementReq) GetPreferDriverOnlyUpload() bool { + if !p.IsSetPreferDriverOnlyUpload() { + return TExecuteStatementReq_PreferDriverOnlyUpload_DEFAULT + } +return *p.PreferDriverOnlyUpload +} +var TExecuteStatementReq_EnforceEmbeddedSchemaCorrectness_DEFAULT bool = false + +func (p *TExecuteStatementReq) GetEnforceEmbeddedSchemaCorrectness() bool { + return p.EnforceEmbeddedSchemaCorrectness +} +var TExecuteStatementReq_IdempotencyToken_DEFAULT string +func (p *TExecuteStatementReq) GetIdempotencyToken() string { + if !p.IsSetIdempotencyToken() { + return TExecuteStatementReq_IdempotencyToken_DEFAULT + } +return *p.IdempotencyToken +} +var TExecuteStatementReq_ThrowErrorOnByteLimitTruncation_DEFAULT bool +func (p *TExecuteStatementReq) GetThrowErrorOnByteLimitTruncation() bool { + if !p.IsSetThrowErrorOnByteLimitTruncation() { + return TExecuteStatementReq_ThrowErrorOnByteLimitTruncation_DEFAULT + } +return *p.ThrowErrorOnByteLimitTruncation +} +func (p *TExecuteStatementReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TExecuteStatementReq) IsSetConfOverlay() bool { + return p.ConfOverlay != nil +} + +func (p *TExecuteStatementReq) IsSetRunAsync() bool { + return p.RunAsync != TExecuteStatementReq_RunAsync_DEFAULT +} + +func (p *TExecuteStatementReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TExecuteStatementReq) IsSetQueryTimeout() bool { + return p.QueryTimeout != TExecuteStatementReq_QueryTimeout_DEFAULT +} + +func (p *TExecuteStatementReq) IsSetCanReadArrowResult_() bool { + return p.CanReadArrowResult_ != nil +} + +func (p *TExecuteStatementReq) IsSetCanDownloadResult_() bool { + return p.CanDownloadResult_ != nil +} + +func (p *TExecuteStatementReq) IsSetCanDecompressLZ4Result_() bool { + return p.CanDecompressLZ4Result_ != nil +} + +func (p *TExecuteStatementReq) IsSetMaxBytesPerFile() bool { + return p.MaxBytesPerFile != nil +} + +func (p *TExecuteStatementReq) IsSetUseArrowNativeTypes() bool { + return p.UseArrowNativeTypes != nil +} + +func (p *TExecuteStatementReq) IsSetResultRowLimit() bool { + return p.ResultRowLimit != nil +} + +func (p *TExecuteStatementReq) IsSetParameters() bool { + return p.Parameters != nil +} + +func (p *TExecuteStatementReq) IsSetMaxBytesPerBatch() bool { + return p.MaxBytesPerBatch != nil +} + +func (p *TExecuteStatementReq) IsSetStatementConf() bool { + return p.StatementConf != nil +} + +func (p *TExecuteStatementReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TExecuteStatementReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TExecuteStatementReq) IsSetRejectHighCostQueries() bool { + return p.RejectHighCostQueries != nil +} + +func (p *TExecuteStatementReq) IsSetEstimatedCost() bool { + return p.EstimatedCost != nil +} + +func (p *TExecuteStatementReq) IsSetExecutionVersion() bool { + return p.ExecutionVersion != nil +} + +func (p *TExecuteStatementReq) IsSetRequestValidation() bool { + return p.RequestValidation != nil +} + +func (p *TExecuteStatementReq) IsSetResultPersistenceMode() bool { + return p.ResultPersistenceMode != nil +} + +func (p *TExecuteStatementReq) IsSetTrimArrowBatchesToLimit() bool { + return p.TrimArrowBatchesToLimit != nil +} + +func (p *TExecuteStatementReq) IsSetFetchDisposition() bool { + return p.FetchDisposition != nil +} + +func (p *TExecuteStatementReq) IsSetEnforceResultPersistenceMode() bool { + return p.EnforceResultPersistenceMode != nil +} + +func (p *TExecuteStatementReq) IsSetStatementList() bool { + return p.StatementList != nil +} + +func (p *TExecuteStatementReq) IsSetPersistResultManifest() bool { + return p.PersistResultManifest != nil +} + +func (p *TExecuteStatementReq) IsSetResultRetentionSeconds() bool { + return p.ResultRetentionSeconds != nil +} + +func (p *TExecuteStatementReq) IsSetResultByteLimit() bool { + return p.ResultByteLimit != nil +} + +func (p *TExecuteStatementReq) IsSetResultDataFormat() bool { + return p.ResultDataFormat != nil +} + +func (p *TExecuteStatementReq) IsSetOriginatingClientIdentity() bool { + return p.OriginatingClientIdentity != nil +} + +func (p *TExecuteStatementReq) IsSetPreferSingleFileResult_() bool { + return p.PreferSingleFileResult_ != nil +} + +func (p *TExecuteStatementReq) IsSetPreferDriverOnlyUpload() bool { + return p.PreferDriverOnlyUpload != nil +} + +func (p *TExecuteStatementReq) IsSetEnforceEmbeddedSchemaCorrectness() bool { + return p.EnforceEmbeddedSchemaCorrectness != TExecuteStatementReq_EnforceEmbeddedSchemaCorrectness_DEFAULT +} + +func (p *TExecuteStatementReq) IsSetIdempotencyToken() bool { + return p.IdempotencyToken != nil +} + +func (p *TExecuteStatementReq) IsSetThrowErrorOnByteLimitTruncation() bool { + return p.ThrowErrorOnByteLimitTruncation != nil +} + +func (p *TExecuteStatementReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetStatement bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetStatement = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.MAP { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1283: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1283(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1284: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1284(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1285: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1285(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1286: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1286(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1287: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1287(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1288: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1288(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1289: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1289(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1296: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1296(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3331: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3331(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3332: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField3332(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3333: + if fieldTypeId == thrift.I16 { + if err := p.ReadField3333(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3334: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3334(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3335: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3335(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3336: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3336(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3337: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3337(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3344: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3344(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3345: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3345(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3346: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3346(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3347: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3347(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3348: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3348(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3349: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3349(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3350: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3350(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3351: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3351(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3352: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3352(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3353: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3353(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3360: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3360(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3361: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3361(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetStatement{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Statement is not set")); + } + return nil +} + +func (p *TExecuteStatementReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Statement = v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin(ctx) + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.ConfOverlay = tMap + for i := 0; i < size; i ++ { +var _key75 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _key75 = v +} +var _val76 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _val76 = v +} + p.ConfOverlay[_key75] = _val76 + } + if err := iprot.ReadMapEnd(ctx); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.QueryTimeout = v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.CanReadArrowResult_ = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1283(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1283: ", err) +} else { + p.CanDownloadResult_ = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1284(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1284: ", err) +} else { + p.CanDecompressLZ4Result_ = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1285(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1285: ", err) +} else { + p.MaxBytesPerFile = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1286(ctx context.Context, iprot thrift.TProtocol) error { + p.UseArrowNativeTypes = &TSparkArrowTypes{} + if err := p.UseArrowNativeTypes.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UseArrowNativeTypes), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField1287(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1287: ", err) +} else { + p.ResultRowLimit = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1288(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TSparkParameter, 0, size) + p.Parameters = tSlice + for i := 0; i < size; i ++ { + _elem77 := &TSparkParameter{} + if err := _elem77.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem77), err) + } + p.Parameters = append(p.Parameters, _elem77) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField1289(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1289: ", err) +} else { + p.MaxBytesPerBatch = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField1296(ctx context.Context, iprot thrift.TProtocol) error { + p.StatementConf = &TStatementConf{} + if err := p.StatementConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StatementConf), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField3331(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3331: ", err) +} else { + p.RejectHighCostQueries = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3332(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 3332: ", err) +} else { + p.EstimatedCost = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3333(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 3333: ", err) +} else { + p.ExecutionVersion = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3334(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 3334: ", err) +} else { + p.RequestValidation = v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3335(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3335: ", err) +} else { + temp := TResultPersistenceMode(v) + p.ResultPersistenceMode = &temp +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3336(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3336: ", err) +} else { + p.TrimArrowBatchesToLimit = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3337(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3337: ", err) +} else { + temp := TDBSqlFetchDisposition(v) + p.FetchDisposition = &temp +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3344(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3344: ", err) +} else { + p.EnforceResultPersistenceMode = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3345(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TDBSqlStatement, 0, size) + p.StatementList = tSlice + for i := 0; i < size; i ++ { + _elem78 := &TDBSqlStatement{} + if err := _elem78.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem78), err) + } + p.StatementList = append(p.StatementList, _elem78) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField3346(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3346: ", err) +} else { + p.PersistResultManifest = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3347(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3347: ", err) +} else { + p.ResultRetentionSeconds = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3348(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3348: ", err) +} else { + p.ResultByteLimit = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3349(ctx context.Context, iprot thrift.TProtocol) error { + p.ResultDataFormat = &TDBSqlResultFormat{} + if err := p.ResultDataFormat.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ResultDataFormat), err) + } + return nil +} + +func (p *TExecuteStatementReq) ReadField3350(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3350: ", err) +} else { + p.OriginatingClientIdentity = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3351(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3351: ", err) +} else { + p.PreferSingleFileResult_ = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3352(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3352: ", err) +} else { + p.PreferDriverOnlyUpload = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3353(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3353: ", err) +} else { + p.EnforceEmbeddedSchemaCorrectness = v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3360(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3360: ", err) +} else { + p.IdempotencyToken = &v +} + return nil +} + +func (p *TExecuteStatementReq) ReadField3361(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3361: ", err) +} else { + p.ThrowErrorOnByteLimitTruncation = &v +} + return nil +} + +func (p *TExecuteStatementReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TExecuteStatementReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField1283(ctx, oprot); err != nil { return err } + if err := p.writeField1284(ctx, oprot); err != nil { return err } + if err := p.writeField1285(ctx, oprot); err != nil { return err } + if err := p.writeField1286(ctx, oprot); err != nil { return err } + if err := p.writeField1287(ctx, oprot); err != nil { return err } + if err := p.writeField1288(ctx, oprot); err != nil { return err } + if err := p.writeField1289(ctx, oprot); err != nil { return err } + if err := p.writeField1296(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + if err := p.writeField3331(ctx, oprot); err != nil { return err } + if err := p.writeField3332(ctx, oprot); err != nil { return err } + if err := p.writeField3333(ctx, oprot); err != nil { return err } + if err := p.writeField3334(ctx, oprot); err != nil { return err } + if err := p.writeField3335(ctx, oprot); err != nil { return err } + if err := p.writeField3336(ctx, oprot); err != nil { return err } + if err := p.writeField3337(ctx, oprot); err != nil { return err } + if err := p.writeField3344(ctx, oprot); err != nil { return err } + if err := p.writeField3345(ctx, oprot); err != nil { return err } + if err := p.writeField3346(ctx, oprot); err != nil { return err } + if err := p.writeField3347(ctx, oprot); err != nil { return err } + if err := p.writeField3348(ctx, oprot); err != nil { return err } + if err := p.writeField3349(ctx, oprot); err != nil { return err } + if err := p.writeField3350(ctx, oprot); err != nil { return err } + if err := p.writeField3351(ctx, oprot); err != nil { return err } + if err := p.writeField3352(ctx, oprot); err != nil { return err } + if err := p.writeField3353(ctx, oprot); err != nil { return err } + if err := p.writeField3360(ctx, oprot); err != nil { return err } + if err := p.writeField3361(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TExecuteStatementReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TExecuteStatementReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "statement", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:statement: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Statement)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.statement (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:statement: ", p), err) } + return err +} + +func (p *TExecuteStatementReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetConfOverlay() { + if err := oprot.WriteFieldBegin(ctx, "confOverlay", thrift.MAP, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:confOverlay: ", p), err) } + if err := oprot.WriteMapBegin(ctx, thrift.STRING, thrift.STRING, len(p.ConfOverlay)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.ConfOverlay { + if err := oprot.WriteString(ctx, string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteMapEnd(ctx); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:confOverlay: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:runAsync: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetQueryTimeout() { + if err := oprot.WriteFieldBegin(ctx, "queryTimeout", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:queryTimeout: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.QueryTimeout)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.queryTimeout (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:queryTimeout: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCanReadArrowResult_() { + if err := oprot.WriteFieldBegin(ctx, "canReadArrowResult", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:canReadArrowResult: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.CanReadArrowResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.canReadArrowResult (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:canReadArrowResult: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1283(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCanDownloadResult_() { + if err := oprot.WriteFieldBegin(ctx, "canDownloadResult", thrift.BOOL, 1283); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1283:canDownloadResult: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.CanDownloadResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.canDownloadResult (1283) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1283:canDownloadResult: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1284(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCanDecompressLZ4Result_() { + if err := oprot.WriteFieldBegin(ctx, "canDecompressLZ4Result", thrift.BOOL, 1284); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1284:canDecompressLZ4Result: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.CanDecompressLZ4Result_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.canDecompressLZ4Result (1284) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1284:canDecompressLZ4Result: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1285(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxBytesPerFile() { + if err := oprot.WriteFieldBegin(ctx, "maxBytesPerFile", thrift.I64, 1285); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1285:maxBytesPerFile: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.MaxBytesPerFile)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxBytesPerFile (1285) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1285:maxBytesPerFile: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1286(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUseArrowNativeTypes() { + if err := oprot.WriteFieldBegin(ctx, "useArrowNativeTypes", thrift.STRUCT, 1286); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1286:useArrowNativeTypes: ", p), err) } + if err := p.UseArrowNativeTypes.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UseArrowNativeTypes), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1286:useArrowNativeTypes: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1287(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultRowLimit() { + if err := oprot.WriteFieldBegin(ctx, "resultRowLimit", thrift.I64, 1287); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1287:resultRowLimit: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ResultRowLimit)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultRowLimit (1287) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1287:resultRowLimit: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1288(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParameters() { + if err := oprot.WriteFieldBegin(ctx, "parameters", thrift.LIST, 1288); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1288:parameters: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Parameters)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Parameters { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1288:parameters: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1289(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxBytesPerBatch() { + if err := oprot.WriteFieldBegin(ctx, "maxBytesPerBatch", thrift.I64, 1289); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1289:maxBytesPerBatch: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.MaxBytesPerBatch)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxBytesPerBatch (1289) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1289:maxBytesPerBatch: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField1296(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatementConf() { + if err := oprot.WriteFieldBegin(ctx, "statementConf", thrift.STRUCT, 1296); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1296:statementConf: ", p), err) } + if err := p.StatementConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StatementConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1296:statementConf: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3331(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRejectHighCostQueries() { + if err := oprot.WriteFieldBegin(ctx, "rejectHighCostQueries", thrift.BOOL, 3331); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3331:rejectHighCostQueries: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.RejectHighCostQueries)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rejectHighCostQueries (3331) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3331:rejectHighCostQueries: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3332(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEstimatedCost() { + if err := oprot.WriteFieldBegin(ctx, "estimatedCost", thrift.DOUBLE, 3332); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3332:estimatedCost: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.EstimatedCost)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.estimatedCost (3332) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3332:estimatedCost: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3333(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExecutionVersion() { + if err := oprot.WriteFieldBegin(ctx, "executionVersion", thrift.I16, 3333); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3333:executionVersion: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.ExecutionVersion)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.executionVersion (3333) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3333:executionVersion: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3334(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRequestValidation() { + if err := oprot.WriteFieldBegin(ctx, "requestValidation", thrift.STRING, 3334); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3334:requestValidation: ", p), err) } + if err := oprot.WriteBinary(ctx, p.RequestValidation); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.requestValidation (3334) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3334:requestValidation: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3335(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultPersistenceMode() { + if err := oprot.WriteFieldBegin(ctx, "resultPersistenceMode", thrift.I32, 3335); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3335:resultPersistenceMode: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ResultPersistenceMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultPersistenceMode (3335) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3335:resultPersistenceMode: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3336(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTrimArrowBatchesToLimit() { + if err := oprot.WriteFieldBegin(ctx, "trimArrowBatchesToLimit", thrift.BOOL, 3336); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3336:trimArrowBatchesToLimit: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.TrimArrowBatchesToLimit)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trimArrowBatchesToLimit (3336) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3336:trimArrowBatchesToLimit: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3337(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFetchDisposition() { + if err := oprot.WriteFieldBegin(ctx, "fetchDisposition", thrift.I32, 3337); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3337:fetchDisposition: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.FetchDisposition)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fetchDisposition (3337) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3337:fetchDisposition: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3344(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEnforceResultPersistenceMode() { + if err := oprot.WriteFieldBegin(ctx, "enforceResultPersistenceMode", thrift.BOOL, 3344); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3344:enforceResultPersistenceMode: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.EnforceResultPersistenceMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.enforceResultPersistenceMode (3344) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3344:enforceResultPersistenceMode: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3345(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatementList() { + if err := oprot.WriteFieldBegin(ctx, "statementList", thrift.LIST, 3345); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3345:statementList: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.StatementList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.StatementList { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3345:statementList: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3346(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPersistResultManifest() { + if err := oprot.WriteFieldBegin(ctx, "persistResultManifest", thrift.BOOL, 3346); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3346:persistResultManifest: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.PersistResultManifest)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.persistResultManifest (3346) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3346:persistResultManifest: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3347(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultRetentionSeconds() { + if err := oprot.WriteFieldBegin(ctx, "resultRetentionSeconds", thrift.I64, 3347); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3347:resultRetentionSeconds: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ResultRetentionSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultRetentionSeconds (3347) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3347:resultRetentionSeconds: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3348(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultByteLimit() { + if err := oprot.WriteFieldBegin(ctx, "resultByteLimit", thrift.I64, 3348); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3348:resultByteLimit: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ResultByteLimit)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultByteLimit (3348) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3348:resultByteLimit: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3349(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultDataFormat() { + if err := oprot.WriteFieldBegin(ctx, "resultDataFormat", thrift.STRUCT, 3349); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3349:resultDataFormat: ", p), err) } + if err := p.ResultDataFormat.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ResultDataFormat), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3349:resultDataFormat: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3350(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOriginatingClientIdentity() { + if err := oprot.WriteFieldBegin(ctx, "originatingClientIdentity", thrift.STRING, 3350); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3350:originatingClientIdentity: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.OriginatingClientIdentity)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.originatingClientIdentity (3350) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3350:originatingClientIdentity: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3351(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPreferSingleFileResult_() { + if err := oprot.WriteFieldBegin(ctx, "preferSingleFileResult", thrift.BOOL, 3351); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3351:preferSingleFileResult: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.PreferSingleFileResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.preferSingleFileResult (3351) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3351:preferSingleFileResult: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3352(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetPreferDriverOnlyUpload() { + if err := oprot.WriteFieldBegin(ctx, "preferDriverOnlyUpload", thrift.BOOL, 3352); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3352:preferDriverOnlyUpload: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.PreferDriverOnlyUpload)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.preferDriverOnlyUpload (3352) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3352:preferDriverOnlyUpload: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3353(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetEnforceEmbeddedSchemaCorrectness() { + if err := oprot.WriteFieldBegin(ctx, "enforceEmbeddedSchemaCorrectness", thrift.BOOL, 3353); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3353:enforceEmbeddedSchemaCorrectness: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.EnforceEmbeddedSchemaCorrectness)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.enforceEmbeddedSchemaCorrectness (3353) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3353:enforceEmbeddedSchemaCorrectness: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3360(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIdempotencyToken() { + if err := oprot.WriteFieldBegin(ctx, "idempotencyToken", thrift.STRING, 3360); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3360:idempotencyToken: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.IdempotencyToken)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.idempotencyToken (3360) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3360:idempotencyToken: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) writeField3361(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetThrowErrorOnByteLimitTruncation() { + if err := oprot.WriteFieldBegin(ctx, "throwErrorOnByteLimitTruncation", thrift.BOOL, 3361); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3361:throwErrorOnByteLimitTruncation: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.ThrowErrorOnByteLimitTruncation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.throwErrorOnByteLimitTruncation (3361) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3361:throwErrorOnByteLimitTruncation: ", p), err) } + } + return err +} + +func (p *TExecuteStatementReq) Equals(other *TExecuteStatementReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.Statement != other.Statement { return false } + if len(p.ConfOverlay) != len(other.ConfOverlay) { return false } + for k, _tgt := range p.ConfOverlay { + _src79 := other.ConfOverlay[k] + if _tgt != _src79 { return false } + } + if p.RunAsync != other.RunAsync { return false } + if p.QueryTimeout != other.QueryTimeout { return false } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.CanReadArrowResult_ != other.CanReadArrowResult_ { + if p.CanReadArrowResult_ == nil || other.CanReadArrowResult_ == nil { + return false + } + if (*p.CanReadArrowResult_) != (*other.CanReadArrowResult_) { return false } + } + if p.CanDownloadResult_ != other.CanDownloadResult_ { + if p.CanDownloadResult_ == nil || other.CanDownloadResult_ == nil { + return false + } + if (*p.CanDownloadResult_) != (*other.CanDownloadResult_) { return false } + } + if p.CanDecompressLZ4Result_ != other.CanDecompressLZ4Result_ { + if p.CanDecompressLZ4Result_ == nil || other.CanDecompressLZ4Result_ == nil { + return false + } + if (*p.CanDecompressLZ4Result_) != (*other.CanDecompressLZ4Result_) { return false } + } + if p.MaxBytesPerFile != other.MaxBytesPerFile { + if p.MaxBytesPerFile == nil || other.MaxBytesPerFile == nil { + return false + } + if (*p.MaxBytesPerFile) != (*other.MaxBytesPerFile) { return false } + } + if !p.UseArrowNativeTypes.Equals(other.UseArrowNativeTypes) { return false } + if p.ResultRowLimit != other.ResultRowLimit { + if p.ResultRowLimit == nil || other.ResultRowLimit == nil { + return false + } + if (*p.ResultRowLimit) != (*other.ResultRowLimit) { return false } + } + if len(p.Parameters) != len(other.Parameters) { return false } + for i, _tgt := range p.Parameters { + _src80 := other.Parameters[i] + if !_tgt.Equals(_src80) { return false } + } + if p.MaxBytesPerBatch != other.MaxBytesPerBatch { + if p.MaxBytesPerBatch == nil || other.MaxBytesPerBatch == nil { + return false + } + if (*p.MaxBytesPerBatch) != (*other.MaxBytesPerBatch) { return false } + } + if !p.StatementConf.Equals(other.StatementConf) { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + if p.RejectHighCostQueries != other.RejectHighCostQueries { + if p.RejectHighCostQueries == nil || other.RejectHighCostQueries == nil { + return false + } + if (*p.RejectHighCostQueries) != (*other.RejectHighCostQueries) { return false } + } + if p.EstimatedCost != other.EstimatedCost { + if p.EstimatedCost == nil || other.EstimatedCost == nil { + return false + } + if (*p.EstimatedCost) != (*other.EstimatedCost) { return false } + } + if p.ExecutionVersion != other.ExecutionVersion { + if p.ExecutionVersion == nil || other.ExecutionVersion == nil { + return false + } + if (*p.ExecutionVersion) != (*other.ExecutionVersion) { return false } + } + if bytes.Compare(p.RequestValidation, other.RequestValidation) != 0 { return false } + if p.ResultPersistenceMode != other.ResultPersistenceMode { + if p.ResultPersistenceMode == nil || other.ResultPersistenceMode == nil { + return false + } + if (*p.ResultPersistenceMode) != (*other.ResultPersistenceMode) { return false } + } + if p.TrimArrowBatchesToLimit != other.TrimArrowBatchesToLimit { + if p.TrimArrowBatchesToLimit == nil || other.TrimArrowBatchesToLimit == nil { + return false + } + if (*p.TrimArrowBatchesToLimit) != (*other.TrimArrowBatchesToLimit) { return false } + } + if p.FetchDisposition != other.FetchDisposition { + if p.FetchDisposition == nil || other.FetchDisposition == nil { + return false + } + if (*p.FetchDisposition) != (*other.FetchDisposition) { return false } + } + if p.EnforceResultPersistenceMode != other.EnforceResultPersistenceMode { + if p.EnforceResultPersistenceMode == nil || other.EnforceResultPersistenceMode == nil { + return false + } + if (*p.EnforceResultPersistenceMode) != (*other.EnforceResultPersistenceMode) { return false } + } + if len(p.StatementList) != len(other.StatementList) { return false } + for i, _tgt := range p.StatementList { + _src81 := other.StatementList[i] + if !_tgt.Equals(_src81) { return false } + } + if p.PersistResultManifest != other.PersistResultManifest { + if p.PersistResultManifest == nil || other.PersistResultManifest == nil { + return false + } + if (*p.PersistResultManifest) != (*other.PersistResultManifest) { return false } + } + if p.ResultRetentionSeconds != other.ResultRetentionSeconds { + if p.ResultRetentionSeconds == nil || other.ResultRetentionSeconds == nil { + return false + } + if (*p.ResultRetentionSeconds) != (*other.ResultRetentionSeconds) { return false } + } + if p.ResultByteLimit != other.ResultByteLimit { + if p.ResultByteLimit == nil || other.ResultByteLimit == nil { + return false + } + if (*p.ResultByteLimit) != (*other.ResultByteLimit) { return false } + } + if !p.ResultDataFormat.Equals(other.ResultDataFormat) { return false } + if p.OriginatingClientIdentity != other.OriginatingClientIdentity { + if p.OriginatingClientIdentity == nil || other.OriginatingClientIdentity == nil { + return false + } + if (*p.OriginatingClientIdentity) != (*other.OriginatingClientIdentity) { return false } + } + if p.PreferSingleFileResult_ != other.PreferSingleFileResult_ { + if p.PreferSingleFileResult_ == nil || other.PreferSingleFileResult_ == nil { + return false + } + if (*p.PreferSingleFileResult_) != (*other.PreferSingleFileResult_) { return false } + } + if p.PreferDriverOnlyUpload != other.PreferDriverOnlyUpload { + if p.PreferDriverOnlyUpload == nil || other.PreferDriverOnlyUpload == nil { + return false + } + if (*p.PreferDriverOnlyUpload) != (*other.PreferDriverOnlyUpload) { return false } + } + if p.EnforceEmbeddedSchemaCorrectness != other.EnforceEmbeddedSchemaCorrectness { return false } + if p.IdempotencyToken != other.IdempotencyToken { + if p.IdempotencyToken == nil || other.IdempotencyToken == nil { + return false + } + if (*p.IdempotencyToken) != (*other.IdempotencyToken) { return false } + } + if p.ThrowErrorOnByteLimitTruncation != other.ThrowErrorOnByteLimitTruncation { + if p.ThrowErrorOnByteLimitTruncation == nil || other.ThrowErrorOnByteLimitTruncation == nil { + return false + } + if (*p.ThrowErrorOnByteLimitTruncation) != (*other.ThrowErrorOnByteLimitTruncation) { return false } + } + return true +} + +func (p *TExecuteStatementReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TExecuteStatementReq(%+v)", *p) +} + +func (p *TExecuteStatementReq) Validate() error { + return nil +} +// Attributes: +// - Statement +type TDBSqlStatement struct { + Statement *string `thrift:"statement,1" db:"statement" json:"statement,omitempty"` +} + +func NewTDBSqlStatement() *TDBSqlStatement { + return &TDBSqlStatement{} +} + +var TDBSqlStatement_Statement_DEFAULT string +func (p *TDBSqlStatement) GetStatement() string { + if !p.IsSetStatement() { + return TDBSqlStatement_Statement_DEFAULT + } +return *p.Statement +} +func (p *TDBSqlStatement) IsSetStatement() bool { + return p.Statement != nil +} + +func (p *TDBSqlStatement) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TDBSqlStatement) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Statement = &v +} + return nil +} + +func (p *TDBSqlStatement) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TDBSqlStatement"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TDBSqlStatement) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatement() { + if err := oprot.WriteFieldBegin(ctx, "statement", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:statement: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Statement)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.statement (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:statement: ", p), err) } + } + return err +} + +func (p *TDBSqlStatement) Equals(other *TDBSqlStatement) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Statement != other.Statement { + if p.Statement == nil || other.Statement == nil { + return false + } + if (*p.Statement) != (*other.Statement) { return false } + } + return true +} + +func (p *TDBSqlStatement) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TDBSqlStatement(%+v)", *p) +} + +func (p *TDBSqlStatement) Validate() error { + return nil +} +// Attributes: +// - StringValue +// - DoubleValue +// - BooleanValue +type TSparkParameterValue struct { + StringValue *string `thrift:"stringValue,1" db:"stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `thrift:"doubleValue,2" db:"doubleValue" json:"doubleValue,omitempty"` + BooleanValue *bool `thrift:"booleanValue,3" db:"booleanValue" json:"booleanValue,omitempty"` +} + +func NewTSparkParameterValue() *TSparkParameterValue { + return &TSparkParameterValue{} +} + +var TSparkParameterValue_StringValue_DEFAULT string +func (p *TSparkParameterValue) GetStringValue() string { + if !p.IsSetStringValue() { + return TSparkParameterValue_StringValue_DEFAULT + } +return *p.StringValue +} +var TSparkParameterValue_DoubleValue_DEFAULT float64 +func (p *TSparkParameterValue) GetDoubleValue() float64 { + if !p.IsSetDoubleValue() { + return TSparkParameterValue_DoubleValue_DEFAULT + } +return *p.DoubleValue +} +var TSparkParameterValue_BooleanValue_DEFAULT bool +func (p *TSparkParameterValue) GetBooleanValue() bool { + if !p.IsSetBooleanValue() { + return TSparkParameterValue_BooleanValue_DEFAULT + } +return *p.BooleanValue +} +func (p *TSparkParameterValue) CountSetFieldsTSparkParameterValue() int { + count := 0 + if (p.IsSetStringValue()) { + count++ + } + if (p.IsSetDoubleValue()) { + count++ + } + if (p.IsSetBooleanValue()) { + count++ + } + return count + +} + +func (p *TSparkParameterValue) IsSetStringValue() bool { + return p.StringValue != nil +} + +func (p *TSparkParameterValue) IsSetDoubleValue() bool { + return p.DoubleValue != nil +} + +func (p *TSparkParameterValue) IsSetBooleanValue() bool { + return p.BooleanValue != nil +} + +func (p *TSparkParameterValue) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TSparkParameterValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.StringValue = &v +} + return nil +} + +func (p *TSparkParameterValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DoubleValue = &v +} + return nil +} + +func (p *TSparkParameterValue) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.BooleanValue = &v +} + return nil +} + +func (p *TSparkParameterValue) Write(ctx context.Context, oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTSparkParameterValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) + } + if err := oprot.WriteStructBegin(ctx, "TSparkParameterValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkParameterValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStringValue() { + if err := oprot.WriteFieldBegin(ctx, "stringValue", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:stringValue: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.StringValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stringValue (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:stringValue: ", p), err) } + } + return err +} + +func (p *TSparkParameterValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDoubleValue() { + if err := oprot.WriteFieldBegin(ctx, "doubleValue", thrift.DOUBLE, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:doubleValue: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.DoubleValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.doubleValue (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:doubleValue: ", p), err) } + } + return err +} + +func (p *TSparkParameterValue) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetBooleanValue() { + if err := oprot.WriteFieldBegin(ctx, "booleanValue", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:booleanValue: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.BooleanValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.booleanValue (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:booleanValue: ", p), err) } + } + return err +} + +func (p *TSparkParameterValue) Equals(other *TSparkParameterValue) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.StringValue != other.StringValue { + if p.StringValue == nil || other.StringValue == nil { + return false + } + if (*p.StringValue) != (*other.StringValue) { return false } + } + if p.DoubleValue != other.DoubleValue { + if p.DoubleValue == nil || other.DoubleValue == nil { + return false + } + if (*p.DoubleValue) != (*other.DoubleValue) { return false } + } + if p.BooleanValue != other.BooleanValue { + if p.BooleanValue == nil || other.BooleanValue == nil { + return false + } + if (*p.BooleanValue) != (*other.BooleanValue) { return false } + } + return true +} + +func (p *TSparkParameterValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkParameterValue(%+v)", *p) +} + +func (p *TSparkParameterValue) Validate() error { + return nil +} +// Attributes: +// - Ordinal +// - Name +// - Type +// - Value +type TSparkParameter struct { + Ordinal *int32 `thrift:"ordinal,1" db:"ordinal" json:"ordinal,omitempty"` + Name *string `thrift:"name,2" db:"name" json:"name,omitempty"` + Type *string `thrift:"type,3" db:"type" json:"type,omitempty"` + Value *TSparkParameterValue `thrift:"value,4" db:"value" json:"value,omitempty"` +} + +func NewTSparkParameter() *TSparkParameter { + return &TSparkParameter{} +} + +var TSparkParameter_Ordinal_DEFAULT int32 +func (p *TSparkParameter) GetOrdinal() int32 { + if !p.IsSetOrdinal() { + return TSparkParameter_Ordinal_DEFAULT + } +return *p.Ordinal +} +var TSparkParameter_Name_DEFAULT string +func (p *TSparkParameter) GetName() string { + if !p.IsSetName() { + return TSparkParameter_Name_DEFAULT + } +return *p.Name +} +var TSparkParameter_Type_DEFAULT string +func (p *TSparkParameter) GetType() string { + if !p.IsSetType() { + return TSparkParameter_Type_DEFAULT + } +return *p.Type +} +var TSparkParameter_Value_DEFAULT *TSparkParameterValue +func (p *TSparkParameter) GetValue() *TSparkParameterValue { + if !p.IsSetValue() { + return TSparkParameter_Value_DEFAULT + } +return p.Value +} +func (p *TSparkParameter) IsSetOrdinal() bool { + return p.Ordinal != nil +} + +func (p *TSparkParameter) IsSetName() bool { + return p.Name != nil +} + +func (p *TSparkParameter) IsSetType() bool { + return p.Type != nil +} + +func (p *TSparkParameter) IsSetValue() bool { + return p.Value != nil +} + +func (p *TSparkParameter) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TSparkParameter) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Ordinal = &v +} + return nil +} + +func (p *TSparkParameter) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Name = &v +} + return nil +} + +func (p *TSparkParameter) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Type = &v +} + return nil +} + +func (p *TSparkParameter) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Value = &TSparkParameterValue{} + if err := p.Value.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *TSparkParameter) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TSparkParameter"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TSparkParameter) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOrdinal() { + if err := oprot.WriteFieldBegin(ctx, "ordinal", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ordinal: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.Ordinal)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ordinal (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ordinal: ", p), err) } + } + return err +} + +func (p *TSparkParameter) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:name: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:name: ", p), err) } + } + return err +} + +func (p *TSparkParameter) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetType() { + if err := oprot.WriteFieldBegin(ctx, "type", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) } + } + return err +} + +func (p *TSparkParameter) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetValue() { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:value: ", p), err) } + if err := p.Value.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:value: ", p), err) } + } + return err +} + +func (p *TSparkParameter) Equals(other *TSparkParameter) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ordinal != other.Ordinal { + if p.Ordinal == nil || other.Ordinal == nil { + return false + } + if (*p.Ordinal) != (*other.Ordinal) { return false } + } + if p.Name != other.Name { + if p.Name == nil || other.Name == nil { + return false + } + if (*p.Name) != (*other.Name) { return false } + } + if p.Type != other.Type { + if p.Type == nil || other.Type == nil { + return false + } + if (*p.Type) != (*other.Type) { return false } + } + if !p.Value.Equals(other.Value) { return false } + return true +} + +func (p *TSparkParameter) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TSparkParameter(%+v)", *p) +} + +func (p *TSparkParameter) Validate() error { + return nil +} +// Attributes: +// - Sessionless +// - InitialNamespace +// - ClientProtocol +// - ClientProtocolI64 +type TStatementConf struct { + Sessionless *bool `thrift:"sessionless,1" db:"sessionless" json:"sessionless,omitempty"` + InitialNamespace *TNamespace `thrift:"initialNamespace,2" db:"initialNamespace" json:"initialNamespace,omitempty"` + ClientProtocol *TProtocolVersion `thrift:"client_protocol,3" db:"client_protocol" json:"client_protocol,omitempty"` + ClientProtocolI64 *int64 `thrift:"client_protocol_i64,4" db:"client_protocol_i64" json:"client_protocol_i64,omitempty"` +} + +func NewTStatementConf() *TStatementConf { + return &TStatementConf{} +} + +var TStatementConf_Sessionless_DEFAULT bool +func (p *TStatementConf) GetSessionless() bool { + if !p.IsSetSessionless() { + return TStatementConf_Sessionless_DEFAULT + } +return *p.Sessionless +} +var TStatementConf_InitialNamespace_DEFAULT *TNamespace +func (p *TStatementConf) GetInitialNamespace() *TNamespace { + if !p.IsSetInitialNamespace() { + return TStatementConf_InitialNamespace_DEFAULT + } +return p.InitialNamespace +} +var TStatementConf_ClientProtocol_DEFAULT TProtocolVersion +func (p *TStatementConf) GetClientProtocol() TProtocolVersion { + if !p.IsSetClientProtocol() { + return TStatementConf_ClientProtocol_DEFAULT + } +return *p.ClientProtocol +} +var TStatementConf_ClientProtocolI64_DEFAULT int64 +func (p *TStatementConf) GetClientProtocolI64() int64 { + if !p.IsSetClientProtocolI64() { + return TStatementConf_ClientProtocolI64_DEFAULT + } +return *p.ClientProtocolI64 +} +func (p *TStatementConf) IsSetSessionless() bool { + return p.Sessionless != nil +} + +func (p *TStatementConf) IsSetInitialNamespace() bool { + return p.InitialNamespace != nil +} + +func (p *TStatementConf) IsSetClientProtocol() bool { + return p.ClientProtocol != nil +} + +func (p *TStatementConf) IsSetClientProtocolI64() bool { + return p.ClientProtocolI64 != nil +} + +func (p *TStatementConf) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TStatementConf) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Sessionless = &v +} + return nil +} + +func (p *TStatementConf) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.InitialNamespace = &TNamespace{} + if err := p.InitialNamespace.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.InitialNamespace), err) + } + return nil +} + +func (p *TStatementConf) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TProtocolVersion(v) + p.ClientProtocol = &temp +} + return nil +} + +func (p *TStatementConf) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ClientProtocolI64 = &v +} + return nil +} + +func (p *TStatementConf) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TStatementConf"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TStatementConf) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionless() { + if err := oprot.WriteFieldBegin(ctx, "sessionless", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionless: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.Sessionless)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sessionless (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionless: ", p), err) } + } + return err +} + +func (p *TStatementConf) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetInitialNamespace() { + if err := oprot.WriteFieldBegin(ctx, "initialNamespace", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:initialNamespace: ", p), err) } + if err := p.InitialNamespace.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.InitialNamespace), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:initialNamespace: ", p), err) } + } + return err +} + +func (p *TStatementConf) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetClientProtocol() { + if err := oprot.WriteFieldBegin(ctx, "client_protocol", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:client_protocol: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ClientProtocol)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_protocol (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:client_protocol: ", p), err) } + } + return err +} + +func (p *TStatementConf) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetClientProtocolI64() { + if err := oprot.WriteFieldBegin(ctx, "client_protocol_i64", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:client_protocol_i64: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ClientProtocolI64)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_protocol_i64 (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:client_protocol_i64: ", p), err) } + } + return err +} + +func (p *TStatementConf) Equals(other *TStatementConf) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Sessionless != other.Sessionless { + if p.Sessionless == nil || other.Sessionless == nil { + return false + } + if (*p.Sessionless) != (*other.Sessionless) { return false } + } + if !p.InitialNamespace.Equals(other.InitialNamespace) { return false } + if p.ClientProtocol != other.ClientProtocol { + if p.ClientProtocol == nil || other.ClientProtocol == nil { + return false + } + if (*p.ClientProtocol) != (*other.ClientProtocol) { return false } + } + if p.ClientProtocolI64 != other.ClientProtocolI64 { + if p.ClientProtocolI64 == nil || other.ClientProtocolI64 == nil { + return false + } + if (*p.ClientProtocolI64) != (*other.ClientProtocolI64) { return false } + } + return true +} + +func (p *TStatementConf) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TStatementConf(%+v)", *p) +} + +func (p *TStatementConf) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +// - ExecutionRejected +// - MaxClusterCapacity +// - QueryCost +// - SessionConf +// - CurrentClusterLoad +// - IdempotencyType +// - RemoteResultCacheEnabled +// - IsServerless +// - OperationHandles +type TExecuteStatementResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` + // unused fields # 1282 to 3328 + ExecutionRejected *bool `thrift:"executionRejected,3329" db:"executionRejected" json:"executionRejected,omitempty"` + MaxClusterCapacity *float64 `thrift:"maxClusterCapacity,3330" db:"maxClusterCapacity" json:"maxClusterCapacity,omitempty"` + QueryCost *float64 `thrift:"queryCost,3331" db:"queryCost" json:"queryCost,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3332" db:"sessionConf" json:"sessionConf,omitempty"` + CurrentClusterLoad *float64 `thrift:"currentClusterLoad,3333" db:"currentClusterLoad" json:"currentClusterLoad,omitempty"` + IdempotencyType *TOperationIdempotencyType `thrift:"idempotencyType,3334" db:"idempotencyType" json:"idempotencyType,omitempty"` + RemoteResultCacheEnabled *bool `thrift:"remoteResultCacheEnabled,3335" db:"remoteResultCacheEnabled" json:"remoteResultCacheEnabled,omitempty"` + IsServerless *bool `thrift:"isServerless,3336" db:"isServerless" json:"isServerless,omitempty"` + OperationHandles []*TOperationHandle `thrift:"operationHandles,3337" db:"operationHandles" json:"operationHandles,omitempty"` +} + +func NewTExecuteStatementResp() *TExecuteStatementResp { + return &TExecuteStatementResp{} +} + +var TExecuteStatementResp_Status_DEFAULT *TStatus +func (p *TExecuteStatementResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TExecuteStatementResp_Status_DEFAULT + } +return p.Status +} +var TExecuteStatementResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TExecuteStatementResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TExecuteStatementResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TExecuteStatementResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TExecuteStatementResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TExecuteStatementResp_DirectResults_DEFAULT + } +return p.DirectResults +} +var TExecuteStatementResp_ExecutionRejected_DEFAULT bool +func (p *TExecuteStatementResp) GetExecutionRejected() bool { + if !p.IsSetExecutionRejected() { + return TExecuteStatementResp_ExecutionRejected_DEFAULT + } +return *p.ExecutionRejected +} +var TExecuteStatementResp_MaxClusterCapacity_DEFAULT float64 +func (p *TExecuteStatementResp) GetMaxClusterCapacity() float64 { + if !p.IsSetMaxClusterCapacity() { + return TExecuteStatementResp_MaxClusterCapacity_DEFAULT + } +return *p.MaxClusterCapacity +} +var TExecuteStatementResp_QueryCost_DEFAULT float64 +func (p *TExecuteStatementResp) GetQueryCost() float64 { + if !p.IsSetQueryCost() { + return TExecuteStatementResp_QueryCost_DEFAULT + } +return *p.QueryCost +} +var TExecuteStatementResp_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TExecuteStatementResp) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TExecuteStatementResp_SessionConf_DEFAULT + } +return p.SessionConf +} +var TExecuteStatementResp_CurrentClusterLoad_DEFAULT float64 +func (p *TExecuteStatementResp) GetCurrentClusterLoad() float64 { + if !p.IsSetCurrentClusterLoad() { + return TExecuteStatementResp_CurrentClusterLoad_DEFAULT + } +return *p.CurrentClusterLoad +} +var TExecuteStatementResp_IdempotencyType_DEFAULT TOperationIdempotencyType +func (p *TExecuteStatementResp) GetIdempotencyType() TOperationIdempotencyType { + if !p.IsSetIdempotencyType() { + return TExecuteStatementResp_IdempotencyType_DEFAULT + } +return *p.IdempotencyType +} +var TExecuteStatementResp_RemoteResultCacheEnabled_DEFAULT bool +func (p *TExecuteStatementResp) GetRemoteResultCacheEnabled() bool { + if !p.IsSetRemoteResultCacheEnabled() { + return TExecuteStatementResp_RemoteResultCacheEnabled_DEFAULT + } +return *p.RemoteResultCacheEnabled +} +var TExecuteStatementResp_IsServerless_DEFAULT bool +func (p *TExecuteStatementResp) GetIsServerless() bool { + if !p.IsSetIsServerless() { + return TExecuteStatementResp_IsServerless_DEFAULT + } +return *p.IsServerless +} +var TExecuteStatementResp_OperationHandles_DEFAULT []*TOperationHandle + +func (p *TExecuteStatementResp) GetOperationHandles() []*TOperationHandle { + return p.OperationHandles +} +func (p *TExecuteStatementResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TExecuteStatementResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TExecuteStatementResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TExecuteStatementResp) IsSetExecutionRejected() bool { + return p.ExecutionRejected != nil +} + +func (p *TExecuteStatementResp) IsSetMaxClusterCapacity() bool { + return p.MaxClusterCapacity != nil +} + +func (p *TExecuteStatementResp) IsSetQueryCost() bool { + return p.QueryCost != nil +} + +func (p *TExecuteStatementResp) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TExecuteStatementResp) IsSetCurrentClusterLoad() bool { + return p.CurrentClusterLoad != nil +} + +func (p *TExecuteStatementResp) IsSetIdempotencyType() bool { + return p.IdempotencyType != nil +} + +func (p *TExecuteStatementResp) IsSetRemoteResultCacheEnabled() bool { + return p.RemoteResultCacheEnabled != nil +} + +func (p *TExecuteStatementResp) IsSetIsServerless() bool { + return p.IsServerless != nil +} + +func (p *TExecuteStatementResp) IsSetOperationHandles() bool { + return p.OperationHandles != nil +} + +func (p *TExecuteStatementResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3331: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField3331(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3332: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3332(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3333: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField3333(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3334: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3334(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3335: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3335(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3336: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3336(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3337: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3337(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TExecuteStatementResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TExecuteStatementResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TExecuteStatementResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TExecuteStatementResp) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ExecutionRejected = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 3330: ", err) +} else { + p.MaxClusterCapacity = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3331(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 3331: ", err) +} else { + p.QueryCost = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3332(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TExecuteStatementResp) ReadField3333(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 3333: ", err) +} else { + p.CurrentClusterLoad = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3334(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3334: ", err) +} else { + temp := TOperationIdempotencyType(v) + p.IdempotencyType = &temp +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3335(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3335: ", err) +} else { + p.RemoteResultCacheEnabled = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3336(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3336: ", err) +} else { + p.IsServerless = &v +} + return nil +} + +func (p *TExecuteStatementResp) ReadField3337(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TOperationHandle, 0, size) + p.OperationHandles = tSlice + for i := 0; i < size; i ++ { + _elem82 := &TOperationHandle{} + if err := _elem82.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem82), err) + } + p.OperationHandles = append(p.OperationHandles, _elem82) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TExecuteStatementResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TExecuteStatementResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + if err := p.writeField3331(ctx, oprot); err != nil { return err } + if err := p.writeField3332(ctx, oprot); err != nil { return err } + if err := p.writeField3333(ctx, oprot); err != nil { return err } + if err := p.writeField3334(ctx, oprot); err != nil { return err } + if err := p.writeField3335(ctx, oprot); err != nil { return err } + if err := p.writeField3336(ctx, oprot); err != nil { return err } + if err := p.writeField3337(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TExecuteStatementResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TExecuteStatementResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExecutionRejected() { + if err := oprot.WriteFieldBegin(ctx, "executionRejected", thrift.BOOL, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:executionRejected: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.ExecutionRejected)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.executionRejected (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:executionRejected: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxClusterCapacity() { + if err := oprot.WriteFieldBegin(ctx, "maxClusterCapacity", thrift.DOUBLE, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:maxClusterCapacity: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.MaxClusterCapacity)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxClusterCapacity (3330) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:maxClusterCapacity: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3331(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetQueryCost() { + if err := oprot.WriteFieldBegin(ctx, "queryCost", thrift.DOUBLE, 3331); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3331:queryCost: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.QueryCost)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.queryCost (3331) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3331:queryCost: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3332(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3332); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3332:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3332:sessionConf: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3333(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCurrentClusterLoad() { + if err := oprot.WriteFieldBegin(ctx, "currentClusterLoad", thrift.DOUBLE, 3333); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3333:currentClusterLoad: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.CurrentClusterLoad)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.currentClusterLoad (3333) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3333:currentClusterLoad: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3334(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIdempotencyType() { + if err := oprot.WriteFieldBegin(ctx, "idempotencyType", thrift.I32, 3334); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3334:idempotencyType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.IdempotencyType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.idempotencyType (3334) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3334:idempotencyType: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3335(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteResultCacheEnabled() { + if err := oprot.WriteFieldBegin(ctx, "remoteResultCacheEnabled", thrift.BOOL, 3335); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3335:remoteResultCacheEnabled: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.RemoteResultCacheEnabled)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remoteResultCacheEnabled (3335) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3335:remoteResultCacheEnabled: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3336(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIsServerless() { + if err := oprot.WriteFieldBegin(ctx, "isServerless", thrift.BOOL, 3336); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3336:isServerless: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IsServerless)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isServerless (3336) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3336:isServerless: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) writeField3337(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandles() { + if err := oprot.WriteFieldBegin(ctx, "operationHandles", thrift.LIST, 3337); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3337:operationHandles: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.OperationHandles)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.OperationHandles { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3337:operationHandles: ", p), err) } + } + return err +} + +func (p *TExecuteStatementResp) Equals(other *TExecuteStatementResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + if p.ExecutionRejected != other.ExecutionRejected { + if p.ExecutionRejected == nil || other.ExecutionRejected == nil { + return false + } + if (*p.ExecutionRejected) != (*other.ExecutionRejected) { return false } + } + if p.MaxClusterCapacity != other.MaxClusterCapacity { + if p.MaxClusterCapacity == nil || other.MaxClusterCapacity == nil { + return false + } + if (*p.MaxClusterCapacity) != (*other.MaxClusterCapacity) { return false } + } + if p.QueryCost != other.QueryCost { + if p.QueryCost == nil || other.QueryCost == nil { + return false + } + if (*p.QueryCost) != (*other.QueryCost) { return false } + } + if !p.SessionConf.Equals(other.SessionConf) { return false } + if p.CurrentClusterLoad != other.CurrentClusterLoad { + if p.CurrentClusterLoad == nil || other.CurrentClusterLoad == nil { + return false + } + if (*p.CurrentClusterLoad) != (*other.CurrentClusterLoad) { return false } + } + if p.IdempotencyType != other.IdempotencyType { + if p.IdempotencyType == nil || other.IdempotencyType == nil { + return false + } + if (*p.IdempotencyType) != (*other.IdempotencyType) { return false } + } + if p.RemoteResultCacheEnabled != other.RemoteResultCacheEnabled { + if p.RemoteResultCacheEnabled == nil || other.RemoteResultCacheEnabled == nil { + return false + } + if (*p.RemoteResultCacheEnabled) != (*other.RemoteResultCacheEnabled) { return false } + } + if p.IsServerless != other.IsServerless { + if p.IsServerless == nil || other.IsServerless == nil { + return false + } + if (*p.IsServerless) != (*other.IsServerless) { return false } + } + if len(p.OperationHandles) != len(other.OperationHandles) { return false } + for i, _tgt := range p.OperationHandles { + _src83 := other.OperationHandles[i] + if !_tgt.Equals(_src83) { return false } + } + return true +} + +func (p *TExecuteStatementResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TExecuteStatementResp(%+v)", *p) +} + +func (p *TExecuteStatementResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetTypeInfoReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + // unused fields # 2 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetTypeInfoReq() *TGetTypeInfoReq { + return &TGetTypeInfoReq{} +} + +var TGetTypeInfoReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetTypeInfoReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetTypeInfoReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetTypeInfoReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetTypeInfoReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetTypeInfoReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetTypeInfoReq_RunAsync_DEFAULT bool = false + +func (p *TGetTypeInfoReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetTypeInfoReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetTypeInfoReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetTypeInfoReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetTypeInfoReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetTypeInfoReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetTypeInfoReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetTypeInfoReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetTypeInfoReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetTypeInfoReq) IsSetRunAsync() bool { + return p.RunAsync != TGetTypeInfoReq_RunAsync_DEFAULT +} + +func (p *TGetTypeInfoReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetTypeInfoReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetTypeInfoReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetTypeInfoReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetTypeInfoReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetTypeInfoReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetTypeInfoReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetTypeInfoReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetTypeInfoReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTypeInfoReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTypeInfoReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetTypeInfoReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoReq) Equals(other *TGetTypeInfoReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetTypeInfoReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTypeInfoReq(%+v)", *p) +} + +func (p *TGetTypeInfoReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetTypeInfoResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetTypeInfoResp() *TGetTypeInfoResp { + return &TGetTypeInfoResp{} +} + +var TGetTypeInfoResp_Status_DEFAULT *TStatus +func (p *TGetTypeInfoResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetTypeInfoResp_Status_DEFAULT + } +return p.Status +} +var TGetTypeInfoResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetTypeInfoResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetTypeInfoResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetTypeInfoResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetTypeInfoResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetTypeInfoResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetTypeInfoResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetTypeInfoResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetTypeInfoResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetTypeInfoResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetTypeInfoResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetTypeInfoResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetTypeInfoResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetTypeInfoResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTypeInfoResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTypeInfoResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetTypeInfoResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetTypeInfoResp) Equals(other *TGetTypeInfoResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetTypeInfoResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTypeInfoResp(%+v)", *p) +} + +func (p *TGetTypeInfoResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetCatalogsReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + // unused fields # 2 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetCatalogsReq() *TGetCatalogsReq { + return &TGetCatalogsReq{} +} + +var TGetCatalogsReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetCatalogsReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetCatalogsReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetCatalogsReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetCatalogsReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetCatalogsReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetCatalogsReq_RunAsync_DEFAULT bool = false + +func (p *TGetCatalogsReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetCatalogsReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetCatalogsReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetCatalogsReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetCatalogsReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetCatalogsReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetCatalogsReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetCatalogsReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetCatalogsReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetCatalogsReq) IsSetRunAsync() bool { + return p.RunAsync != TGetCatalogsReq_RunAsync_DEFAULT +} + +func (p *TGetCatalogsReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetCatalogsReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetCatalogsReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetCatalogsReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetCatalogsReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetCatalogsReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetCatalogsReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetCatalogsReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetCatalogsReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetCatalogsReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetCatalogsReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetCatalogsReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetCatalogsReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetCatalogsReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetCatalogsReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetCatalogsReq) Equals(other *TGetCatalogsReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetCatalogsReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetCatalogsReq(%+v)", *p) +} + +func (p *TGetCatalogsReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetCatalogsResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetCatalogsResp() *TGetCatalogsResp { + return &TGetCatalogsResp{} +} + +var TGetCatalogsResp_Status_DEFAULT *TStatus +func (p *TGetCatalogsResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetCatalogsResp_Status_DEFAULT + } +return p.Status +} +var TGetCatalogsResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetCatalogsResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetCatalogsResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetCatalogsResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetCatalogsResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetCatalogsResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetCatalogsResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetCatalogsResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetCatalogsResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetCatalogsResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetCatalogsResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetCatalogsResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetCatalogsResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetCatalogsResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetCatalogsResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetCatalogsResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetCatalogsResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetCatalogsResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetCatalogsResp) Equals(other *TGetCatalogsResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetCatalogsResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetCatalogsResp(%+v)", *p) +} + +func (p *TGetCatalogsResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - CatalogName +// - SchemaName +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetSchemasReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + CatalogName *TIdentifier `thrift:"catalogName,2" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TPatternOrIdentifier `thrift:"schemaName,3" db:"schemaName" json:"schemaName,omitempty"` + // unused fields # 4 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetSchemasReq() *TGetSchemasReq { + return &TGetSchemasReq{} +} + +var TGetSchemasReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetSchemasReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetSchemasReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetSchemasReq_CatalogName_DEFAULT TIdentifier +func (p *TGetSchemasReq) GetCatalogName() TIdentifier { + if !p.IsSetCatalogName() { + return TGetSchemasReq_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TGetSchemasReq_SchemaName_DEFAULT TPatternOrIdentifier +func (p *TGetSchemasReq) GetSchemaName() TPatternOrIdentifier { + if !p.IsSetSchemaName() { + return TGetSchemasReq_SchemaName_DEFAULT + } +return *p.SchemaName +} +var TGetSchemasReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetSchemasReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetSchemasReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetSchemasReq_RunAsync_DEFAULT bool = false + +func (p *TGetSchemasReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetSchemasReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetSchemasReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetSchemasReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetSchemasReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetSchemasReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetSchemasReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetSchemasReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetSchemasReq) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TGetSchemasReq) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TGetSchemasReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetSchemasReq) IsSetRunAsync() bool { + return p.RunAsync != TGetSchemasReq_RunAsync_DEFAULT +} + +func (p *TGetSchemasReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetSchemasReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetSchemasReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetSchemasReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetSchemasReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TGetSchemasReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TGetSchemasReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetSchemasReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetSchemasReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetSchemasReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetSchemasReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetSchemasReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetSchemasReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetSchemasReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:catalogName: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schemaName: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetSchemasReq) Equals(other *TGetSchemasReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetSchemasReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetSchemasReq(%+v)", *p) +} + +func (p *TGetSchemasReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetSchemasResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetSchemasResp() *TGetSchemasResp { + return &TGetSchemasResp{} +} + +var TGetSchemasResp_Status_DEFAULT *TStatus +func (p *TGetSchemasResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetSchemasResp_Status_DEFAULT + } +return p.Status +} +var TGetSchemasResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetSchemasResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetSchemasResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetSchemasResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetSchemasResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetSchemasResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetSchemasResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetSchemasResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetSchemasResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetSchemasResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetSchemasResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetSchemasResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetSchemasResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetSchemasResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetSchemasResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetSchemasResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetSchemasResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetSchemasResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetSchemasResp) Equals(other *TGetSchemasResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetSchemasResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetSchemasResp(%+v)", *p) +} + +func (p *TGetSchemasResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - CatalogName +// - SchemaName +// - TableName +// - TableTypes +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetTablesReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + CatalogName *TPatternOrIdentifier `thrift:"catalogName,2" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TPatternOrIdentifier `thrift:"schemaName,3" db:"schemaName" json:"schemaName,omitempty"` + TableName *TPatternOrIdentifier `thrift:"tableName,4" db:"tableName" json:"tableName,omitempty"` + TableTypes []string `thrift:"tableTypes,5" db:"tableTypes" json:"tableTypes,omitempty"` + // unused fields # 6 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetTablesReq() *TGetTablesReq { + return &TGetTablesReq{} +} + +var TGetTablesReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetTablesReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetTablesReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetTablesReq_CatalogName_DEFAULT TPatternOrIdentifier +func (p *TGetTablesReq) GetCatalogName() TPatternOrIdentifier { + if !p.IsSetCatalogName() { + return TGetTablesReq_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TGetTablesReq_SchemaName_DEFAULT TPatternOrIdentifier +func (p *TGetTablesReq) GetSchemaName() TPatternOrIdentifier { + if !p.IsSetSchemaName() { + return TGetTablesReq_SchemaName_DEFAULT + } +return *p.SchemaName +} +var TGetTablesReq_TableName_DEFAULT TPatternOrIdentifier +func (p *TGetTablesReq) GetTableName() TPatternOrIdentifier { + if !p.IsSetTableName() { + return TGetTablesReq_TableName_DEFAULT + } +return *p.TableName +} +var TGetTablesReq_TableTypes_DEFAULT []string + +func (p *TGetTablesReq) GetTableTypes() []string { + return p.TableTypes +} +var TGetTablesReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetTablesReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetTablesReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetTablesReq_RunAsync_DEFAULT bool = false + +func (p *TGetTablesReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetTablesReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetTablesReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetTablesReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetTablesReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetTablesReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetTablesReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetTablesReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetTablesReq) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TGetTablesReq) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TGetTablesReq) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TGetTablesReq) IsSetTableTypes() bool { + return p.TableTypes != nil +} + +func (p *TGetTablesReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetTablesReq) IsSetRunAsync() bool { + return p.RunAsync != TGetTablesReq_RunAsync_DEFAULT +} + +func (p *TGetTablesReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetTablesReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetTablesReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetTablesReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetTablesReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TGetTablesReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TGetTablesReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.TableName = &temp +} + return nil +} + +func (p *TGetTablesReq) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.TableTypes = tSlice + for i := 0; i < size; i ++ { +var _elem84 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem84 = v +} + p.TableTypes = append(p.TableTypes, _elem84) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TGetTablesReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetTablesReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetTablesReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetTablesReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetTablesReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTablesReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTablesReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetTablesReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:catalogName: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schemaName: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err := oprot.WriteFieldBegin(ctx, "tableName", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:tableName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.TableName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tableName (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:tableName: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTableTypes() { + if err := oprot.WriteFieldBegin(ctx, "tableTypes", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:tableTypes: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.TableTypes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.TableTypes { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:tableTypes: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetTablesReq) Equals(other *TGetTablesReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + if p.TableName != other.TableName { + if p.TableName == nil || other.TableName == nil { + return false + } + if (*p.TableName) != (*other.TableName) { return false } + } + if len(p.TableTypes) != len(other.TableTypes) { return false } + for i, _tgt := range p.TableTypes { + _src85 := other.TableTypes[i] + if _tgt != _src85 { return false } + } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetTablesReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTablesReq(%+v)", *p) +} + +func (p *TGetTablesReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetTablesResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetTablesResp() *TGetTablesResp { + return &TGetTablesResp{} +} + +var TGetTablesResp_Status_DEFAULT *TStatus +func (p *TGetTablesResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetTablesResp_Status_DEFAULT + } +return p.Status +} +var TGetTablesResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetTablesResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetTablesResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetTablesResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetTablesResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetTablesResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetTablesResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetTablesResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetTablesResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetTablesResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetTablesResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetTablesResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetTablesResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetTablesResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTablesResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTablesResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetTablesResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetTablesResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetTablesResp) Equals(other *TGetTablesResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetTablesResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTablesResp(%+v)", *p) +} + +func (p *TGetTablesResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetTableTypesReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + // unused fields # 2 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetTableTypesReq() *TGetTableTypesReq { + return &TGetTableTypesReq{} +} + +var TGetTableTypesReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetTableTypesReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetTableTypesReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetTableTypesReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetTableTypesReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetTableTypesReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetTableTypesReq_RunAsync_DEFAULT bool = false + +func (p *TGetTableTypesReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetTableTypesReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetTableTypesReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetTableTypesReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetTableTypesReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetTableTypesReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetTableTypesReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetTableTypesReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetTableTypesReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetTableTypesReq) IsSetRunAsync() bool { + return p.RunAsync != TGetTableTypesReq_RunAsync_DEFAULT +} + +func (p *TGetTableTypesReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetTableTypesReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetTableTypesReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetTableTypesReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetTableTypesReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetTableTypesReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetTableTypesReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetTableTypesReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetTableTypesReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTableTypesReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTableTypesReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetTableTypesReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetTableTypesReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetTableTypesReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetTableTypesReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetTableTypesReq) Equals(other *TGetTableTypesReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetTableTypesReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTableTypesReq(%+v)", *p) +} + +func (p *TGetTableTypesReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetTableTypesResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetTableTypesResp() *TGetTableTypesResp { + return &TGetTableTypesResp{} +} + +var TGetTableTypesResp_Status_DEFAULT *TStatus +func (p *TGetTableTypesResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetTableTypesResp_Status_DEFAULT + } +return p.Status +} +var TGetTableTypesResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetTableTypesResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetTableTypesResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetTableTypesResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetTableTypesResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetTableTypesResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetTableTypesResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetTableTypesResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetTableTypesResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetTableTypesResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetTableTypesResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetTableTypesResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetTableTypesResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetTableTypesResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetTableTypesResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetTableTypesResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetTableTypesResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetTableTypesResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetTableTypesResp) Equals(other *TGetTableTypesResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetTableTypesResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetTableTypesResp(%+v)", *p) +} + +func (p *TGetTableTypesResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - CatalogName +// - SchemaName +// - TableName +// - ColumnName +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetColumnsReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + CatalogName *TIdentifier `thrift:"catalogName,2" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TPatternOrIdentifier `thrift:"schemaName,3" db:"schemaName" json:"schemaName,omitempty"` + TableName *TPatternOrIdentifier `thrift:"tableName,4" db:"tableName" json:"tableName,omitempty"` + ColumnName *TPatternOrIdentifier `thrift:"columnName,5" db:"columnName" json:"columnName,omitempty"` + // unused fields # 6 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetColumnsReq() *TGetColumnsReq { + return &TGetColumnsReq{} +} + +var TGetColumnsReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetColumnsReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetColumnsReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetColumnsReq_CatalogName_DEFAULT TIdentifier +func (p *TGetColumnsReq) GetCatalogName() TIdentifier { + if !p.IsSetCatalogName() { + return TGetColumnsReq_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TGetColumnsReq_SchemaName_DEFAULT TPatternOrIdentifier +func (p *TGetColumnsReq) GetSchemaName() TPatternOrIdentifier { + if !p.IsSetSchemaName() { + return TGetColumnsReq_SchemaName_DEFAULT + } +return *p.SchemaName +} +var TGetColumnsReq_TableName_DEFAULT TPatternOrIdentifier +func (p *TGetColumnsReq) GetTableName() TPatternOrIdentifier { + if !p.IsSetTableName() { + return TGetColumnsReq_TableName_DEFAULT + } +return *p.TableName +} +var TGetColumnsReq_ColumnName_DEFAULT TPatternOrIdentifier +func (p *TGetColumnsReq) GetColumnName() TPatternOrIdentifier { + if !p.IsSetColumnName() { + return TGetColumnsReq_ColumnName_DEFAULT + } +return *p.ColumnName +} +var TGetColumnsReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetColumnsReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetColumnsReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetColumnsReq_RunAsync_DEFAULT bool = false + +func (p *TGetColumnsReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetColumnsReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetColumnsReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetColumnsReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetColumnsReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetColumnsReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetColumnsReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetColumnsReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetColumnsReq) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TGetColumnsReq) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TGetColumnsReq) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TGetColumnsReq) IsSetColumnName() bool { + return p.ColumnName != nil +} + +func (p *TGetColumnsReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetColumnsReq) IsSetRunAsync() bool { + return p.RunAsync != TGetColumnsReq_RunAsync_DEFAULT +} + +func (p *TGetColumnsReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetColumnsReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetColumnsReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetColumnsReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetColumnsReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TGetColumnsReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TGetColumnsReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.TableName = &temp +} + return nil +} + +func (p *TGetColumnsReq) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.ColumnName = &temp +} + return nil +} + +func (p *TGetColumnsReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetColumnsReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetColumnsReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetColumnsReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetColumnsReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetColumnsReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetColumnsReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetColumnsReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:catalogName: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schemaName: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err := oprot.WriteFieldBegin(ctx, "tableName", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:tableName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.TableName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tableName (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:tableName: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetColumnName() { + if err := oprot.WriteFieldBegin(ctx, "columnName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:columnName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ColumnName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.columnName (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:columnName: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetColumnsReq) Equals(other *TGetColumnsReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + if p.TableName != other.TableName { + if p.TableName == nil || other.TableName == nil { + return false + } + if (*p.TableName) != (*other.TableName) { return false } + } + if p.ColumnName != other.ColumnName { + if p.ColumnName == nil || other.ColumnName == nil { + return false + } + if (*p.ColumnName) != (*other.ColumnName) { return false } + } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetColumnsReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetColumnsReq(%+v)", *p) +} + +func (p *TGetColumnsReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetColumnsResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetColumnsResp() *TGetColumnsResp { + return &TGetColumnsResp{} +} + +var TGetColumnsResp_Status_DEFAULT *TStatus +func (p *TGetColumnsResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetColumnsResp_Status_DEFAULT + } +return p.Status +} +var TGetColumnsResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetColumnsResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetColumnsResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetColumnsResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetColumnsResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetColumnsResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetColumnsResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetColumnsResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetColumnsResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetColumnsResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetColumnsResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetColumnsResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetColumnsResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetColumnsResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetColumnsResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetColumnsResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetColumnsResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetColumnsResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetColumnsResp) Equals(other *TGetColumnsResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetColumnsResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetColumnsResp(%+v)", *p) +} + +func (p *TGetColumnsResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - CatalogName +// - SchemaName +// - FunctionName +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetFunctionsReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + CatalogName *TIdentifier `thrift:"catalogName,2" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TPatternOrIdentifier `thrift:"schemaName,3" db:"schemaName" json:"schemaName,omitempty"` + FunctionName TPatternOrIdentifier `thrift:"functionName,4,required" db:"functionName" json:"functionName"` + // unused fields # 5 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetFunctionsReq() *TGetFunctionsReq { + return &TGetFunctionsReq{} +} + +var TGetFunctionsReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetFunctionsReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetFunctionsReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetFunctionsReq_CatalogName_DEFAULT TIdentifier +func (p *TGetFunctionsReq) GetCatalogName() TIdentifier { + if !p.IsSetCatalogName() { + return TGetFunctionsReq_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TGetFunctionsReq_SchemaName_DEFAULT TPatternOrIdentifier +func (p *TGetFunctionsReq) GetSchemaName() TPatternOrIdentifier { + if !p.IsSetSchemaName() { + return TGetFunctionsReq_SchemaName_DEFAULT + } +return *p.SchemaName +} + +func (p *TGetFunctionsReq) GetFunctionName() TPatternOrIdentifier { + return p.FunctionName +} +var TGetFunctionsReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetFunctionsReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetFunctionsReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetFunctionsReq_RunAsync_DEFAULT bool = false + +func (p *TGetFunctionsReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetFunctionsReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetFunctionsReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetFunctionsReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetFunctionsReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetFunctionsReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetFunctionsReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetFunctionsReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetFunctionsReq) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TGetFunctionsReq) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TGetFunctionsReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetFunctionsReq) IsSetRunAsync() bool { + return p.RunAsync != TGetFunctionsReq_RunAsync_DEFAULT +} + +func (p *TGetFunctionsReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetFunctionsReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetFunctionsReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetFunctionName bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetFunctionName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetFunctionName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FunctionName is not set")); + } + return nil +} + +func (p *TGetFunctionsReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetFunctionsReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TGetFunctionsReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TGetFunctionsReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TPatternOrIdentifier(v) + p.FunctionName = temp +} + return nil +} + +func (p *TGetFunctionsReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetFunctionsReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetFunctionsReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetFunctionsReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetFunctionsReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetFunctionsReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetFunctionsReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetFunctionsReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:catalogName: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schemaName: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "functionName", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:functionName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.FunctionName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.functionName (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:functionName: ", p), err) } + return err +} + +func (p *TGetFunctionsReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetFunctionsReq) Equals(other *TGetFunctionsReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + if p.FunctionName != other.FunctionName { return false } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetFunctionsReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetFunctionsReq(%+v)", *p) +} + +func (p *TGetFunctionsReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetFunctionsResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetFunctionsResp() *TGetFunctionsResp { + return &TGetFunctionsResp{} +} + +var TGetFunctionsResp_Status_DEFAULT *TStatus +func (p *TGetFunctionsResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetFunctionsResp_Status_DEFAULT + } +return p.Status +} +var TGetFunctionsResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetFunctionsResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetFunctionsResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetFunctionsResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetFunctionsResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetFunctionsResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetFunctionsResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetFunctionsResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetFunctionsResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetFunctionsResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetFunctionsResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetFunctionsResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetFunctionsResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetFunctionsResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetFunctionsResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetFunctionsResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetFunctionsResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetFunctionsResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetFunctionsResp) Equals(other *TGetFunctionsResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetFunctionsResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetFunctionsResp(%+v)", *p) +} + +func (p *TGetFunctionsResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - CatalogName +// - SchemaName +// - TableName +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetPrimaryKeysReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + CatalogName *TIdentifier `thrift:"catalogName,2" db:"catalogName" json:"catalogName,omitempty"` + SchemaName *TIdentifier `thrift:"schemaName,3" db:"schemaName" json:"schemaName,omitempty"` + TableName *TIdentifier `thrift:"tableName,4" db:"tableName" json:"tableName,omitempty"` + // unused fields # 5 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetPrimaryKeysReq() *TGetPrimaryKeysReq { + return &TGetPrimaryKeysReq{} +} + +var TGetPrimaryKeysReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetPrimaryKeysReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetPrimaryKeysReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetPrimaryKeysReq_CatalogName_DEFAULT TIdentifier +func (p *TGetPrimaryKeysReq) GetCatalogName() TIdentifier { + if !p.IsSetCatalogName() { + return TGetPrimaryKeysReq_CatalogName_DEFAULT + } +return *p.CatalogName +} +var TGetPrimaryKeysReq_SchemaName_DEFAULT TIdentifier +func (p *TGetPrimaryKeysReq) GetSchemaName() TIdentifier { + if !p.IsSetSchemaName() { + return TGetPrimaryKeysReq_SchemaName_DEFAULT + } +return *p.SchemaName +} +var TGetPrimaryKeysReq_TableName_DEFAULT TIdentifier +func (p *TGetPrimaryKeysReq) GetTableName() TIdentifier { + if !p.IsSetTableName() { + return TGetPrimaryKeysReq_TableName_DEFAULT + } +return *p.TableName +} +var TGetPrimaryKeysReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetPrimaryKeysReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetPrimaryKeysReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetPrimaryKeysReq_RunAsync_DEFAULT bool = false + +func (p *TGetPrimaryKeysReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetPrimaryKeysReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetPrimaryKeysReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetPrimaryKeysReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetPrimaryKeysReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetPrimaryKeysReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetPrimaryKeysReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetPrimaryKeysReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetPrimaryKeysReq) IsSetCatalogName() bool { + return p.CatalogName != nil +} + +func (p *TGetPrimaryKeysReq) IsSetSchemaName() bool { + return p.SchemaName != nil +} + +func (p *TGetPrimaryKeysReq) IsSetTableName() bool { + return p.TableName != nil +} + +func (p *TGetPrimaryKeysReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetPrimaryKeysReq) IsSetRunAsync() bool { + return p.RunAsync != TGetPrimaryKeysReq_RunAsync_DEFAULT +} + +func (p *TGetPrimaryKeysReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetPrimaryKeysReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetPrimaryKeysReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.CatalogName = &temp +} + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TIdentifier(v) + p.SchemaName = &temp +} + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TIdentifier(v) + p.TableName = &temp +} + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetPrimaryKeysReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetPrimaryKeysReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetPrimaryKeysReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetPrimaryKeysReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetPrimaryKeysReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "catalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:catalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.CatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.catalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:catalogName: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "schemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:schemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.schemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:schemaName: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTableName() { + if err := oprot.WriteFieldBegin(ctx, "tableName", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:tableName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.TableName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tableName (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:tableName: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysReq) Equals(other *TGetPrimaryKeysReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.CatalogName != other.CatalogName { + if p.CatalogName == nil || other.CatalogName == nil { + return false + } + if (*p.CatalogName) != (*other.CatalogName) { return false } + } + if p.SchemaName != other.SchemaName { + if p.SchemaName == nil || other.SchemaName == nil { + return false + } + if (*p.SchemaName) != (*other.SchemaName) { return false } + } + if p.TableName != other.TableName { + if p.TableName == nil || other.TableName == nil { + return false + } + if (*p.TableName) != (*other.TableName) { return false } + } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetPrimaryKeysReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetPrimaryKeysReq(%+v)", *p) +} + +func (p *TGetPrimaryKeysReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetPrimaryKeysResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetPrimaryKeysResp() *TGetPrimaryKeysResp { + return &TGetPrimaryKeysResp{} +} + +var TGetPrimaryKeysResp_Status_DEFAULT *TStatus +func (p *TGetPrimaryKeysResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetPrimaryKeysResp_Status_DEFAULT + } +return p.Status +} +var TGetPrimaryKeysResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetPrimaryKeysResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetPrimaryKeysResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetPrimaryKeysResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetPrimaryKeysResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetPrimaryKeysResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetPrimaryKeysResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetPrimaryKeysResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetPrimaryKeysResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetPrimaryKeysResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetPrimaryKeysResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetPrimaryKeysResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetPrimaryKeysResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetPrimaryKeysResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetPrimaryKeysResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetPrimaryKeysResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetPrimaryKeysResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetPrimaryKeysResp) Equals(other *TGetPrimaryKeysResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetPrimaryKeysResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetPrimaryKeysResp(%+v)", *p) +} + +func (p *TGetPrimaryKeysResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - ParentCatalogName +// - ParentSchemaName +// - ParentTableName +// - ForeignCatalogName +// - ForeignSchemaName +// - ForeignTableName +// - GetDirectResults +// - RunAsync +// - OperationId +// - SessionConf +type TGetCrossReferenceReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + ParentCatalogName *TIdentifier `thrift:"parentCatalogName,2" db:"parentCatalogName" json:"parentCatalogName,omitempty"` + ParentSchemaName *TIdentifier `thrift:"parentSchemaName,3" db:"parentSchemaName" json:"parentSchemaName,omitempty"` + ParentTableName *TIdentifier `thrift:"parentTableName,4" db:"parentTableName" json:"parentTableName,omitempty"` + ForeignCatalogName *TIdentifier `thrift:"foreignCatalogName,5" db:"foreignCatalogName" json:"foreignCatalogName,omitempty"` + ForeignSchemaName *TIdentifier `thrift:"foreignSchemaName,6" db:"foreignSchemaName" json:"foreignSchemaName,omitempty"` + ForeignTableName *TIdentifier `thrift:"foreignTableName,7" db:"foreignTableName" json:"foreignTableName,omitempty"` + // unused fields # 8 to 1280 + GetDirectResults *TSparkGetDirectResults `thrift:"getDirectResults,1281" db:"getDirectResults" json:"getDirectResults,omitempty"` + RunAsync bool `thrift:"runAsync,1282" db:"runAsync" json:"runAsync"` + // unused fields # 1283 to 3328 + OperationId *THandleIdentifier `thrift:"operationId,3329" db:"operationId" json:"operationId,omitempty"` + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3330" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetCrossReferenceReq() *TGetCrossReferenceReq { + return &TGetCrossReferenceReq{} +} + +var TGetCrossReferenceReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetCrossReferenceReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetCrossReferenceReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} +var TGetCrossReferenceReq_ParentCatalogName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetParentCatalogName() TIdentifier { + if !p.IsSetParentCatalogName() { + return TGetCrossReferenceReq_ParentCatalogName_DEFAULT + } +return *p.ParentCatalogName +} +var TGetCrossReferenceReq_ParentSchemaName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetParentSchemaName() TIdentifier { + if !p.IsSetParentSchemaName() { + return TGetCrossReferenceReq_ParentSchemaName_DEFAULT + } +return *p.ParentSchemaName +} +var TGetCrossReferenceReq_ParentTableName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetParentTableName() TIdentifier { + if !p.IsSetParentTableName() { + return TGetCrossReferenceReq_ParentTableName_DEFAULT + } +return *p.ParentTableName +} +var TGetCrossReferenceReq_ForeignCatalogName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetForeignCatalogName() TIdentifier { + if !p.IsSetForeignCatalogName() { + return TGetCrossReferenceReq_ForeignCatalogName_DEFAULT + } +return *p.ForeignCatalogName +} +var TGetCrossReferenceReq_ForeignSchemaName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetForeignSchemaName() TIdentifier { + if !p.IsSetForeignSchemaName() { + return TGetCrossReferenceReq_ForeignSchemaName_DEFAULT + } +return *p.ForeignSchemaName +} +var TGetCrossReferenceReq_ForeignTableName_DEFAULT TIdentifier +func (p *TGetCrossReferenceReq) GetForeignTableName() TIdentifier { + if !p.IsSetForeignTableName() { + return TGetCrossReferenceReq_ForeignTableName_DEFAULT + } +return *p.ForeignTableName +} +var TGetCrossReferenceReq_GetDirectResults_DEFAULT *TSparkGetDirectResults +func (p *TGetCrossReferenceReq) GetGetDirectResults() *TSparkGetDirectResults { + if !p.IsSetGetDirectResults() { + return TGetCrossReferenceReq_GetDirectResults_DEFAULT + } +return p.GetDirectResults +} +var TGetCrossReferenceReq_RunAsync_DEFAULT bool = false + +func (p *TGetCrossReferenceReq) GetRunAsync() bool { + return p.RunAsync +} +var TGetCrossReferenceReq_OperationId_DEFAULT *THandleIdentifier +func (p *TGetCrossReferenceReq) GetOperationId() *THandleIdentifier { + if !p.IsSetOperationId() { + return TGetCrossReferenceReq_OperationId_DEFAULT + } +return p.OperationId +} +var TGetCrossReferenceReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetCrossReferenceReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetCrossReferenceReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetCrossReferenceReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetCrossReferenceReq) IsSetParentCatalogName() bool { + return p.ParentCatalogName != nil +} + +func (p *TGetCrossReferenceReq) IsSetParentSchemaName() bool { + return p.ParentSchemaName != nil +} + +func (p *TGetCrossReferenceReq) IsSetParentTableName() bool { + return p.ParentTableName != nil +} + +func (p *TGetCrossReferenceReq) IsSetForeignCatalogName() bool { + return p.ForeignCatalogName != nil +} + +func (p *TGetCrossReferenceReq) IsSetForeignSchemaName() bool { + return p.ForeignSchemaName != nil +} + +func (p *TGetCrossReferenceReq) IsSetForeignTableName() bool { + return p.ForeignTableName != nil +} + +func (p *TGetCrossReferenceReq) IsSetGetDirectResults() bool { + return p.GetDirectResults != nil +} + +func (p *TGetCrossReferenceReq) IsSetRunAsync() bool { + return p.RunAsync != TGetCrossReferenceReq_RunAsync_DEFAULT +} + +func (p *TGetCrossReferenceReq) IsSetOperationId() bool { + return p.OperationId != nil +} + +func (p *TGetCrossReferenceReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetCrossReferenceReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + return nil +} + +func (p *TGetCrossReferenceReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetCrossReferenceReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TIdentifier(v) + p.ParentCatalogName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := TIdentifier(v) + p.ParentSchemaName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TIdentifier(v) + p.ParentTableName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + temp := TIdentifier(v) + p.ForeignCatalogName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + temp := TIdentifier(v) + p.ForeignSchemaName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + temp := TIdentifier(v) + p.ForeignTableName = &temp +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.GetDirectResults = &TSparkGetDirectResults{} + if err := p.GetDirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.GetDirectResults), err) + } + return nil +} + +func (p *TGetCrossReferenceReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.RunAsync = v +} + return nil +} + +func (p *TGetCrossReferenceReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationId = &THandleIdentifier{} + if err := p.OperationId.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationId), err) + } + return nil +} + +func (p *TGetCrossReferenceReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetCrossReferenceReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetCrossReferenceReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetCrossReferenceReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetCrossReferenceReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "parentCatalogName", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:parentCatalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ParentCatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentCatalogName (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:parentCatalogName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "parentSchemaName", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:parentSchemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ParentSchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentSchemaName (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:parentSchemaName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentTableName() { + if err := oprot.WriteFieldBegin(ctx, "parentTableName", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentTableName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ParentTableName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentTableName (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentTableName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetForeignCatalogName() { + if err := oprot.WriteFieldBegin(ctx, "foreignCatalogName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:foreignCatalogName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ForeignCatalogName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.foreignCatalogName (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:foreignCatalogName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetForeignSchemaName() { + if err := oprot.WriteFieldBegin(ctx, "foreignSchemaName", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:foreignSchemaName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ForeignSchemaName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.foreignSchemaName (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:foreignSchemaName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetForeignTableName() { + if err := oprot.WriteFieldBegin(ctx, "foreignTableName", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:foreignTableName: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ForeignTableName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.foreignTableName (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:foreignTableName: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "getDirectResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:getDirectResults: ", p), err) } + if err := p.GetDirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.GetDirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:getDirectResults: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRunAsync() { + if err := oprot.WriteFieldBegin(ctx, "runAsync", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:runAsync: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.RunAsync)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.runAsync (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:runAsync: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationId() { + if err := oprot.WriteFieldBegin(ctx, "operationId", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:operationId: ", p), err) } + if err := p.OperationId.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationId), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:operationId: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceReq) Equals(other *TGetCrossReferenceReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.ParentCatalogName != other.ParentCatalogName { + if p.ParentCatalogName == nil || other.ParentCatalogName == nil { + return false + } + if (*p.ParentCatalogName) != (*other.ParentCatalogName) { return false } + } + if p.ParentSchemaName != other.ParentSchemaName { + if p.ParentSchemaName == nil || other.ParentSchemaName == nil { + return false + } + if (*p.ParentSchemaName) != (*other.ParentSchemaName) { return false } + } + if p.ParentTableName != other.ParentTableName { + if p.ParentTableName == nil || other.ParentTableName == nil { + return false + } + if (*p.ParentTableName) != (*other.ParentTableName) { return false } + } + if p.ForeignCatalogName != other.ForeignCatalogName { + if p.ForeignCatalogName == nil || other.ForeignCatalogName == nil { + return false + } + if (*p.ForeignCatalogName) != (*other.ForeignCatalogName) { return false } + } + if p.ForeignSchemaName != other.ForeignSchemaName { + if p.ForeignSchemaName == nil || other.ForeignSchemaName == nil { + return false + } + if (*p.ForeignSchemaName) != (*other.ForeignSchemaName) { return false } + } + if p.ForeignTableName != other.ForeignTableName { + if p.ForeignTableName == nil || other.ForeignTableName == nil { + return false + } + if (*p.ForeignTableName) != (*other.ForeignTableName) { return false } + } + if !p.GetDirectResults.Equals(other.GetDirectResults) { return false } + if p.RunAsync != other.RunAsync { return false } + if !p.OperationId.Equals(other.OperationId) { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetCrossReferenceReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetCrossReferenceReq(%+v)", *p) +} + +func (p *TGetCrossReferenceReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationHandle +// - DirectResults +type TGetCrossReferenceResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationHandle *TOperationHandle `thrift:"operationHandle,2" db:"operationHandle" json:"operationHandle,omitempty"` + // unused fields # 3 to 1280 + DirectResults *TSparkDirectResults `thrift:"directResults,1281" db:"directResults" json:"directResults,omitempty"` +} + +func NewTGetCrossReferenceResp() *TGetCrossReferenceResp { + return &TGetCrossReferenceResp{} +} + +var TGetCrossReferenceResp_Status_DEFAULT *TStatus +func (p *TGetCrossReferenceResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetCrossReferenceResp_Status_DEFAULT + } +return p.Status +} +var TGetCrossReferenceResp_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetCrossReferenceResp) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetCrossReferenceResp_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetCrossReferenceResp_DirectResults_DEFAULT *TSparkDirectResults +func (p *TGetCrossReferenceResp) GetDirectResults() *TSparkDirectResults { + if !p.IsSetDirectResults() { + return TGetCrossReferenceResp_DirectResults_DEFAULT + } +return p.DirectResults +} +func (p *TGetCrossReferenceResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetCrossReferenceResp) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetCrossReferenceResp) IsSetDirectResults() bool { + return p.DirectResults != nil +} + +func (p *TGetCrossReferenceResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetCrossReferenceResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetCrossReferenceResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetCrossReferenceResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.DirectResults = &TSparkDirectResults{} + if err := p.DirectResults.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DirectResults), err) + } + return nil +} + +func (p *TGetCrossReferenceResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetCrossReferenceResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetCrossReferenceResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetCrossReferenceResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationHandle() { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationHandle: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDirectResults() { + if err := oprot.WriteFieldBegin(ctx, "directResults", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:directResults: ", p), err) } + if err := p.DirectResults.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DirectResults), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:directResults: ", p), err) } + } + return err +} + +func (p *TGetCrossReferenceResp) Equals(other *TGetCrossReferenceResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if !p.DirectResults.Equals(other.DirectResults) { return false } + return true +} + +func (p *TGetCrossReferenceResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetCrossReferenceResp(%+v)", *p) +} + +func (p *TGetCrossReferenceResp) Validate() error { + return nil +} +// Attributes: +// - OperationHandle +// - GetProgressUpdate +type TGetOperationStatusReq struct { + OperationHandle *TOperationHandle `thrift:"operationHandle,1,required" db:"operationHandle" json:"operationHandle"` + GetProgressUpdate *bool `thrift:"getProgressUpdate,2" db:"getProgressUpdate" json:"getProgressUpdate,omitempty"` +} + +func NewTGetOperationStatusReq() *TGetOperationStatusReq { + return &TGetOperationStatusReq{} +} + +var TGetOperationStatusReq_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetOperationStatusReq) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetOperationStatusReq_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetOperationStatusReq_GetProgressUpdate_DEFAULT bool +func (p *TGetOperationStatusReq) GetGetProgressUpdate() bool { + if !p.IsSetGetProgressUpdate() { + return TGetOperationStatusReq_GetProgressUpdate_DEFAULT + } +return *p.GetProgressUpdate +} +func (p *TGetOperationStatusReq) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetOperationStatusReq) IsSetGetProgressUpdate() bool { + return p.GetProgressUpdate != nil +} + +func (p *TGetOperationStatusReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationHandle is not set")); + } + return nil +} + +func (p *TGetOperationStatusReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetOperationStatusReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.GetProgressUpdate = &v +} + return nil +} + +func (p *TGetOperationStatusReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetOperationStatusReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetOperationStatusReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationHandle: ", p), err) } + return err +} + +func (p *TGetOperationStatusReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetGetProgressUpdate() { + if err := oprot.WriteFieldBegin(ctx, "getProgressUpdate", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:getProgressUpdate: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.GetProgressUpdate)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.getProgressUpdate (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:getProgressUpdate: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusReq) Equals(other *TGetOperationStatusReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if p.GetProgressUpdate != other.GetProgressUpdate { + if p.GetProgressUpdate == nil || other.GetProgressUpdate == nil { + return false + } + if (*p.GetProgressUpdate) != (*other.GetProgressUpdate) { return false } + } + return true +} + +func (p *TGetOperationStatusReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetOperationStatusReq(%+v)", *p) +} + +func (p *TGetOperationStatusReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - OperationState +// - SqlState +// - ErrorCode +// - ErrorMessage: The long-form error message. This is deprecated in DBR, +// however servers expecting to serve to Simba drivers should be careful +// to keep returning this as these drivers still depend on it. +// +// Clients should avoid using this field and prefer displayMessage and diagnosticInfo if given. +// - TaskStatus +// - OperationStarted +// - OperationCompleted +// - HasResultSet +// - ProgressUpdateResponse +// - NumModifiedRows +// - DisplayMessage +// - DiagnosticInfo +// - ErrorDetailsJson +// - ResponseValidation +// - IdempotencyType +// - StatementTimeout +// - StatementTimeoutLevel +type TGetOperationStatusResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + OperationState *TOperationState `thrift:"operationState,2" db:"operationState" json:"operationState,omitempty"` + SqlState *string `thrift:"sqlState,3" db:"sqlState" json:"sqlState,omitempty"` + ErrorCode *int32 `thrift:"errorCode,4" db:"errorCode" json:"errorCode,omitempty"` + ErrorMessage *string `thrift:"errorMessage,5" db:"errorMessage" json:"errorMessage,omitempty"` + TaskStatus *string `thrift:"taskStatus,6" db:"taskStatus" json:"taskStatus,omitempty"` + OperationStarted *int64 `thrift:"operationStarted,7" db:"operationStarted" json:"operationStarted,omitempty"` + OperationCompleted *int64 `thrift:"operationCompleted,8" db:"operationCompleted" json:"operationCompleted,omitempty"` + HasResultSet *bool `thrift:"hasResultSet,9" db:"hasResultSet" json:"hasResultSet,omitempty"` + ProgressUpdateResponse *TProgressUpdateResp `thrift:"progressUpdateResponse,10" db:"progressUpdateResponse" json:"progressUpdateResponse,omitempty"` + NumModifiedRows *int64 `thrift:"numModifiedRows,11" db:"numModifiedRows" json:"numModifiedRows,omitempty"` + // unused fields # 12 to 1280 + DisplayMessage *string `thrift:"displayMessage,1281" db:"displayMessage" json:"displayMessage,omitempty"` + DiagnosticInfo *string `thrift:"diagnosticInfo,1282" db:"diagnosticInfo" json:"diagnosticInfo,omitempty"` + ErrorDetailsJson *string `thrift:"errorDetailsJson,1283" db:"errorDetailsJson" json:"errorDetailsJson,omitempty"` + // unused fields # 1284 to 3328 + ResponseValidation []byte `thrift:"responseValidation,3329" db:"responseValidation" json:"responseValidation,omitempty"` + IdempotencyType *TOperationIdempotencyType `thrift:"idempotencyType,3330" db:"idempotencyType" json:"idempotencyType,omitempty"` + StatementTimeout *int64 `thrift:"statementTimeout,3331" db:"statementTimeout" json:"statementTimeout,omitempty"` + StatementTimeoutLevel *TOperationTimeoutLevel `thrift:"statementTimeoutLevel,3332" db:"statementTimeoutLevel" json:"statementTimeoutLevel,omitempty"` +} + +func NewTGetOperationStatusResp() *TGetOperationStatusResp { + return &TGetOperationStatusResp{} +} + +var TGetOperationStatusResp_Status_DEFAULT *TStatus +func (p *TGetOperationStatusResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetOperationStatusResp_Status_DEFAULT + } +return p.Status +} +var TGetOperationStatusResp_OperationState_DEFAULT TOperationState +func (p *TGetOperationStatusResp) GetOperationState() TOperationState { + if !p.IsSetOperationState() { + return TGetOperationStatusResp_OperationState_DEFAULT + } +return *p.OperationState +} +var TGetOperationStatusResp_SqlState_DEFAULT string +func (p *TGetOperationStatusResp) GetSqlState() string { + if !p.IsSetSqlState() { + return TGetOperationStatusResp_SqlState_DEFAULT + } +return *p.SqlState +} +var TGetOperationStatusResp_ErrorCode_DEFAULT int32 +func (p *TGetOperationStatusResp) GetErrorCode() int32 { + if !p.IsSetErrorCode() { + return TGetOperationStatusResp_ErrorCode_DEFAULT + } +return *p.ErrorCode +} +var TGetOperationStatusResp_ErrorMessage_DEFAULT string +func (p *TGetOperationStatusResp) GetErrorMessage() string { + if !p.IsSetErrorMessage() { + return TGetOperationStatusResp_ErrorMessage_DEFAULT + } +return *p.ErrorMessage +} +var TGetOperationStatusResp_TaskStatus_DEFAULT string +func (p *TGetOperationStatusResp) GetTaskStatus() string { + if !p.IsSetTaskStatus() { + return TGetOperationStatusResp_TaskStatus_DEFAULT + } +return *p.TaskStatus +} +var TGetOperationStatusResp_OperationStarted_DEFAULT int64 +func (p *TGetOperationStatusResp) GetOperationStarted() int64 { + if !p.IsSetOperationStarted() { + return TGetOperationStatusResp_OperationStarted_DEFAULT + } +return *p.OperationStarted +} +var TGetOperationStatusResp_OperationCompleted_DEFAULT int64 +func (p *TGetOperationStatusResp) GetOperationCompleted() int64 { + if !p.IsSetOperationCompleted() { + return TGetOperationStatusResp_OperationCompleted_DEFAULT + } +return *p.OperationCompleted +} +var TGetOperationStatusResp_HasResultSet_DEFAULT bool +func (p *TGetOperationStatusResp) GetHasResultSet() bool { + if !p.IsSetHasResultSet() { + return TGetOperationStatusResp_HasResultSet_DEFAULT + } +return *p.HasResultSet +} +var TGetOperationStatusResp_ProgressUpdateResponse_DEFAULT *TProgressUpdateResp +func (p *TGetOperationStatusResp) GetProgressUpdateResponse() *TProgressUpdateResp { + if !p.IsSetProgressUpdateResponse() { + return TGetOperationStatusResp_ProgressUpdateResponse_DEFAULT + } +return p.ProgressUpdateResponse +} +var TGetOperationStatusResp_NumModifiedRows_DEFAULT int64 +func (p *TGetOperationStatusResp) GetNumModifiedRows() int64 { + if !p.IsSetNumModifiedRows() { + return TGetOperationStatusResp_NumModifiedRows_DEFAULT + } +return *p.NumModifiedRows +} +var TGetOperationStatusResp_DisplayMessage_DEFAULT string +func (p *TGetOperationStatusResp) GetDisplayMessage() string { + if !p.IsSetDisplayMessage() { + return TGetOperationStatusResp_DisplayMessage_DEFAULT + } +return *p.DisplayMessage +} +var TGetOperationStatusResp_DiagnosticInfo_DEFAULT string +func (p *TGetOperationStatusResp) GetDiagnosticInfo() string { + if !p.IsSetDiagnosticInfo() { + return TGetOperationStatusResp_DiagnosticInfo_DEFAULT + } +return *p.DiagnosticInfo +} +var TGetOperationStatusResp_ErrorDetailsJson_DEFAULT string +func (p *TGetOperationStatusResp) GetErrorDetailsJson() string { + if !p.IsSetErrorDetailsJson() { + return TGetOperationStatusResp_ErrorDetailsJson_DEFAULT + } +return *p.ErrorDetailsJson +} +var TGetOperationStatusResp_ResponseValidation_DEFAULT []byte + +func (p *TGetOperationStatusResp) GetResponseValidation() []byte { + return p.ResponseValidation +} +var TGetOperationStatusResp_IdempotencyType_DEFAULT TOperationIdempotencyType +func (p *TGetOperationStatusResp) GetIdempotencyType() TOperationIdempotencyType { + if !p.IsSetIdempotencyType() { + return TGetOperationStatusResp_IdempotencyType_DEFAULT + } +return *p.IdempotencyType +} +var TGetOperationStatusResp_StatementTimeout_DEFAULT int64 +func (p *TGetOperationStatusResp) GetStatementTimeout() int64 { + if !p.IsSetStatementTimeout() { + return TGetOperationStatusResp_StatementTimeout_DEFAULT + } +return *p.StatementTimeout +} +var TGetOperationStatusResp_StatementTimeoutLevel_DEFAULT TOperationTimeoutLevel +func (p *TGetOperationStatusResp) GetStatementTimeoutLevel() TOperationTimeoutLevel { + if !p.IsSetStatementTimeoutLevel() { + return TGetOperationStatusResp_StatementTimeoutLevel_DEFAULT + } +return *p.StatementTimeoutLevel +} +func (p *TGetOperationStatusResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetOperationStatusResp) IsSetOperationState() bool { + return p.OperationState != nil +} + +func (p *TGetOperationStatusResp) IsSetSqlState() bool { + return p.SqlState != nil +} + +func (p *TGetOperationStatusResp) IsSetErrorCode() bool { + return p.ErrorCode != nil +} + +func (p *TGetOperationStatusResp) IsSetErrorMessage() bool { + return p.ErrorMessage != nil +} + +func (p *TGetOperationStatusResp) IsSetTaskStatus() bool { + return p.TaskStatus != nil +} + +func (p *TGetOperationStatusResp) IsSetOperationStarted() bool { + return p.OperationStarted != nil +} + +func (p *TGetOperationStatusResp) IsSetOperationCompleted() bool { + return p.OperationCompleted != nil +} + +func (p *TGetOperationStatusResp) IsSetHasResultSet() bool { + return p.HasResultSet != nil +} + +func (p *TGetOperationStatusResp) IsSetProgressUpdateResponse() bool { + return p.ProgressUpdateResponse != nil +} + +func (p *TGetOperationStatusResp) IsSetNumModifiedRows() bool { + return p.NumModifiedRows != nil +} + +func (p *TGetOperationStatusResp) IsSetDisplayMessage() bool { + return p.DisplayMessage != nil +} + +func (p *TGetOperationStatusResp) IsSetDiagnosticInfo() bool { + return p.DiagnosticInfo != nil +} + +func (p *TGetOperationStatusResp) IsSetErrorDetailsJson() bool { + return p.ErrorDetailsJson != nil +} + +func (p *TGetOperationStatusResp) IsSetResponseValidation() bool { + return p.ResponseValidation != nil +} + +func (p *TGetOperationStatusResp) IsSetIdempotencyType() bool { + return p.IdempotencyType != nil +} + +func (p *TGetOperationStatusResp) IsSetStatementTimeout() bool { + return p.StatementTimeout != nil +} + +func (p *TGetOperationStatusResp) IsSetStatementTimeoutLevel() bool { + return p.StatementTimeoutLevel != nil +} + +func (p *TGetOperationStatusResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I64 { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1283: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1283(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3331: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3331(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3332: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3332(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetOperationStatusResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetOperationStatusResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TOperationState(v) + p.OperationState = &temp +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SqlState = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ErrorCode = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.ErrorMessage = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.TaskStatus = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.OperationStarted = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) +} else { + p.OperationCompleted = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.HasResultSet = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + p.ProgressUpdateResponse = &TProgressUpdateResp{} + if err := p.ProgressUpdateResponse.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProgressUpdateResponse), err) + } + return nil +} + +func (p *TGetOperationStatusResp) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) +} else { + p.NumModifiedRows = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1281: ", err) +} else { + p.DisplayMessage = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.DiagnosticInfo = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField1283(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1283: ", err) +} else { + p.ErrorDetailsJson = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ResponseValidation = v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3330: ", err) +} else { + temp := TOperationIdempotencyType(v) + p.IdempotencyType = &temp +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField3331(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3331: ", err) +} else { + p.StatementTimeout = &v +} + return nil +} + +func (p *TGetOperationStatusResp) ReadField3332(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3332: ", err) +} else { + temp := TOperationTimeoutLevel(v) + p.StatementTimeoutLevel = &temp +} + return nil +} + +func (p *TGetOperationStatusResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetOperationStatusResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField1283(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + if err := p.writeField3331(ctx, oprot); err != nil { return err } + if err := p.writeField3332(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetOperationStatusResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetOperationStatusResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationState() { + if err := oprot.WriteFieldBegin(ctx, "operationState", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:operationState: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.OperationState)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationState (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:operationState: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSqlState() { + if err := oprot.WriteFieldBegin(ctx, "sqlState", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:sqlState: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.SqlState)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sqlState (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:sqlState: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorCode() { + if err := oprot.WriteFieldBegin(ctx, "errorCode", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:errorCode: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ErrorCode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorCode (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:errorCode: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorMessage() { + if err := oprot.WriteFieldBegin(ctx, "errorMessage", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:errorMessage: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ErrorMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorMessage (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:errorMessage: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTaskStatus() { + if err := oprot.WriteFieldBegin(ctx, "taskStatus", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:taskStatus: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.TaskStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.taskStatus (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:taskStatus: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationStarted() { + if err := oprot.WriteFieldBegin(ctx, "operationStarted", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:operationStarted: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.OperationStarted)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationStarted (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:operationStarted: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationCompleted() { + if err := oprot.WriteFieldBegin(ctx, "operationCompleted", thrift.I64, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:operationCompleted: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.OperationCompleted)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationCompleted (8) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:operationCompleted: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHasResultSet() { + if err := oprot.WriteFieldBegin(ctx, "hasResultSet", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:hasResultSet: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.HasResultSet)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hasResultSet (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:hasResultSet: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetProgressUpdateResponse() { + if err := oprot.WriteFieldBegin(ctx, "progressUpdateResponse", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:progressUpdateResponse: ", p), err) } + if err := p.ProgressUpdateResponse.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProgressUpdateResponse), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:progressUpdateResponse: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetNumModifiedRows() { + if err := oprot.WriteFieldBegin(ctx, "numModifiedRows", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:numModifiedRows: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.NumModifiedRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.numModifiedRows (11) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:numModifiedRows: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDisplayMessage() { + if err := oprot.WriteFieldBegin(ctx, "displayMessage", thrift.STRING, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:displayMessage: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.DisplayMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.displayMessage (1281) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:displayMessage: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDiagnosticInfo() { + if err := oprot.WriteFieldBegin(ctx, "diagnosticInfo", thrift.STRING, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:diagnosticInfo: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.DiagnosticInfo)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.diagnosticInfo (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:diagnosticInfo: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField1283(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetErrorDetailsJson() { + if err := oprot.WriteFieldBegin(ctx, "errorDetailsJson", thrift.STRING, 1283); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1283:errorDetailsJson: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ErrorDetailsJson)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.errorDetailsJson (1283) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1283:errorDetailsJson: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResponseValidation() { + if err := oprot.WriteFieldBegin(ctx, "responseValidation", thrift.STRING, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:responseValidation: ", p), err) } + if err := oprot.WriteBinary(ctx, p.ResponseValidation); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.responseValidation (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:responseValidation: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIdempotencyType() { + if err := oprot.WriteFieldBegin(ctx, "idempotencyType", thrift.I32, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:idempotencyType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.IdempotencyType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.idempotencyType (3330) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:idempotencyType: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField3331(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatementTimeout() { + if err := oprot.WriteFieldBegin(ctx, "statementTimeout", thrift.I64, 3331); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3331:statementTimeout: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.StatementTimeout)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.statementTimeout (3331) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3331:statementTimeout: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) writeField3332(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStatementTimeoutLevel() { + if err := oprot.WriteFieldBegin(ctx, "statementTimeoutLevel", thrift.I32, 3332); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3332:statementTimeoutLevel: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.StatementTimeoutLevel)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.statementTimeoutLevel (3332) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3332:statementTimeoutLevel: ", p), err) } + } + return err +} + +func (p *TGetOperationStatusResp) Equals(other *TGetOperationStatusResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if p.OperationState != other.OperationState { + if p.OperationState == nil || other.OperationState == nil { + return false + } + if (*p.OperationState) != (*other.OperationState) { return false } + } + if p.SqlState != other.SqlState { + if p.SqlState == nil || other.SqlState == nil { + return false + } + if (*p.SqlState) != (*other.SqlState) { return false } + } + if p.ErrorCode != other.ErrorCode { + if p.ErrorCode == nil || other.ErrorCode == nil { + return false + } + if (*p.ErrorCode) != (*other.ErrorCode) { return false } + } + if p.ErrorMessage != other.ErrorMessage { + if p.ErrorMessage == nil || other.ErrorMessage == nil { + return false + } + if (*p.ErrorMessage) != (*other.ErrorMessage) { return false } + } + if p.TaskStatus != other.TaskStatus { + if p.TaskStatus == nil || other.TaskStatus == nil { + return false + } + if (*p.TaskStatus) != (*other.TaskStatus) { return false } + } + if p.OperationStarted != other.OperationStarted { + if p.OperationStarted == nil || other.OperationStarted == nil { + return false + } + if (*p.OperationStarted) != (*other.OperationStarted) { return false } + } + if p.OperationCompleted != other.OperationCompleted { + if p.OperationCompleted == nil || other.OperationCompleted == nil { + return false + } + if (*p.OperationCompleted) != (*other.OperationCompleted) { return false } + } + if p.HasResultSet != other.HasResultSet { + if p.HasResultSet == nil || other.HasResultSet == nil { + return false + } + if (*p.HasResultSet) != (*other.HasResultSet) { return false } + } + if !p.ProgressUpdateResponse.Equals(other.ProgressUpdateResponse) { return false } + if p.NumModifiedRows != other.NumModifiedRows { + if p.NumModifiedRows == nil || other.NumModifiedRows == nil { + return false + } + if (*p.NumModifiedRows) != (*other.NumModifiedRows) { return false } + } + if p.DisplayMessage != other.DisplayMessage { + if p.DisplayMessage == nil || other.DisplayMessage == nil { + return false + } + if (*p.DisplayMessage) != (*other.DisplayMessage) { return false } + } + if p.DiagnosticInfo != other.DiagnosticInfo { + if p.DiagnosticInfo == nil || other.DiagnosticInfo == nil { + return false + } + if (*p.DiagnosticInfo) != (*other.DiagnosticInfo) { return false } + } + if p.ErrorDetailsJson != other.ErrorDetailsJson { + if p.ErrorDetailsJson == nil || other.ErrorDetailsJson == nil { + return false + } + if (*p.ErrorDetailsJson) != (*other.ErrorDetailsJson) { return false } + } + if bytes.Compare(p.ResponseValidation, other.ResponseValidation) != 0 { return false } + if p.IdempotencyType != other.IdempotencyType { + if p.IdempotencyType == nil || other.IdempotencyType == nil { + return false + } + if (*p.IdempotencyType) != (*other.IdempotencyType) { return false } + } + if p.StatementTimeout != other.StatementTimeout { + if p.StatementTimeout == nil || other.StatementTimeout == nil { + return false + } + if (*p.StatementTimeout) != (*other.StatementTimeout) { return false } + } + if p.StatementTimeoutLevel != other.StatementTimeoutLevel { + if p.StatementTimeoutLevel == nil || other.StatementTimeoutLevel == nil { + return false + } + if (*p.StatementTimeoutLevel) != (*other.StatementTimeoutLevel) { return false } + } + return true +} + +func (p *TGetOperationStatusResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetOperationStatusResp(%+v)", *p) +} + +func (p *TGetOperationStatusResp) Validate() error { + return nil +} +// Attributes: +// - OperationHandle +// - ExecutionVersion +// - ReplacedByNextAttempt +type TCancelOperationReq struct { + OperationHandle *TOperationHandle `thrift:"operationHandle,1,required" db:"operationHandle" json:"operationHandle"` + // unused fields # 2 to 3328 + ExecutionVersion *int16 `thrift:"executionVersion,3329" db:"executionVersion" json:"executionVersion,omitempty"` + ReplacedByNextAttempt *bool `thrift:"replacedByNextAttempt,3330" db:"replacedByNextAttempt" json:"replacedByNextAttempt,omitempty"` +} + +func NewTCancelOperationReq() *TCancelOperationReq { + return &TCancelOperationReq{} +} + +var TCancelOperationReq_OperationHandle_DEFAULT *TOperationHandle +func (p *TCancelOperationReq) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TCancelOperationReq_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TCancelOperationReq_ExecutionVersion_DEFAULT int16 +func (p *TCancelOperationReq) GetExecutionVersion() int16 { + if !p.IsSetExecutionVersion() { + return TCancelOperationReq_ExecutionVersion_DEFAULT + } +return *p.ExecutionVersion +} +var TCancelOperationReq_ReplacedByNextAttempt_DEFAULT bool +func (p *TCancelOperationReq) GetReplacedByNextAttempt() bool { + if !p.IsSetReplacedByNextAttempt() { + return TCancelOperationReq_ReplacedByNextAttempt_DEFAULT + } +return *p.ReplacedByNextAttempt +} +func (p *TCancelOperationReq) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TCancelOperationReq) IsSetExecutionVersion() bool { + return p.ExecutionVersion != nil +} + +func (p *TCancelOperationReq) IsSetReplacedByNextAttempt() bool { + return p.ReplacedByNextAttempt != nil +} + +func (p *TCancelOperationReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.I16 { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationHandle is not set")); + } + return nil +} + +func (p *TCancelOperationReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TCancelOperationReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ExecutionVersion = &v +} + return nil +} + +func (p *TCancelOperationReq) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3330: ", err) +} else { + p.ReplacedByNextAttempt = &v +} + return nil +} + +func (p *TCancelOperationReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCancelOperationReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCancelOperationReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationHandle: ", p), err) } + return err +} + +func (p *TCancelOperationReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetExecutionVersion() { + if err := oprot.WriteFieldBegin(ctx, "executionVersion", thrift.I16, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:executionVersion: ", p), err) } + if err := oprot.WriteI16(ctx, int16(*p.ExecutionVersion)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.executionVersion (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:executionVersion: ", p), err) } + } + return err +} + +func (p *TCancelOperationReq) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetReplacedByNextAttempt() { + if err := oprot.WriteFieldBegin(ctx, "replacedByNextAttempt", thrift.BOOL, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:replacedByNextAttempt: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.ReplacedByNextAttempt)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.replacedByNextAttempt (3330) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:replacedByNextAttempt: ", p), err) } + } + return err +} + +func (p *TCancelOperationReq) Equals(other *TCancelOperationReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if p.ExecutionVersion != other.ExecutionVersion { + if p.ExecutionVersion == nil || other.ExecutionVersion == nil { + return false + } + if (*p.ExecutionVersion) != (*other.ExecutionVersion) { return false } + } + if p.ReplacedByNextAttempt != other.ReplacedByNextAttempt { + if p.ReplacedByNextAttempt == nil || other.ReplacedByNextAttempt == nil { + return false + } + if (*p.ReplacedByNextAttempt) != (*other.ReplacedByNextAttempt) { return false } + } + return true +} + +func (p *TCancelOperationReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCancelOperationReq(%+v)", *p) +} + +func (p *TCancelOperationReq) Validate() error { + return nil +} +// Attributes: +// - Status +type TCancelOperationResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` +} + +func NewTCancelOperationResp() *TCancelOperationResp { + return &TCancelOperationResp{} +} + +var TCancelOperationResp_Status_DEFAULT *TStatus +func (p *TCancelOperationResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TCancelOperationResp_Status_DEFAULT + } +return p.Status +} +func (p *TCancelOperationResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCancelOperationResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TCancelOperationResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TCancelOperationResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCancelOperationResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCancelOperationResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TCancelOperationResp) Equals(other *TCancelOperationResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + return true +} + +func (p *TCancelOperationResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCancelOperationResp(%+v)", *p) +} + +func (p *TCancelOperationResp) Validate() error { + return nil +} +// Attributes: +// - OperationHandle +// - CloseReason +type TCloseOperationReq struct { + OperationHandle *TOperationHandle `thrift:"operationHandle,1,required" db:"operationHandle" json:"operationHandle"` + // unused fields # 2 to 3328 + CloseReason TDBSqlCloseOperationReason `thrift:"closeReason,3329" db:"closeReason" json:"closeReason"` +} + +func NewTCloseOperationReq() *TCloseOperationReq { + return &TCloseOperationReq{ +CloseReason: 0, +} +} + +var TCloseOperationReq_OperationHandle_DEFAULT *TOperationHandle +func (p *TCloseOperationReq) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TCloseOperationReq_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TCloseOperationReq_CloseReason_DEFAULT TDBSqlCloseOperationReason = 0 + +func (p *TCloseOperationReq) GetCloseReason() TDBSqlCloseOperationReason { + return p.CloseReason +} +func (p *TCloseOperationReq) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TCloseOperationReq) IsSetCloseReason() bool { + return p.CloseReason != TCloseOperationReq_CloseReason_DEFAULT +} + +func (p *TCloseOperationReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationHandle is not set")); + } + return nil +} + +func (p *TCloseOperationReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TCloseOperationReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + temp := TDBSqlCloseOperationReason(v) + p.CloseReason = temp +} + return nil +} + +func (p *TCloseOperationReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCloseOperationReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCloseOperationReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationHandle: ", p), err) } + return err +} + +func (p *TCloseOperationReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCloseReason() { + if err := oprot.WriteFieldBegin(ctx, "closeReason", thrift.I32, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:closeReason: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.CloseReason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.closeReason (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:closeReason: ", p), err) } + } + return err +} + +func (p *TCloseOperationReq) Equals(other *TCloseOperationReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if p.CloseReason != other.CloseReason { return false } + return true +} + +func (p *TCloseOperationReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCloseOperationReq(%+v)", *p) +} + +func (p *TCloseOperationReq) Validate() error { + return nil +} +// Attributes: +// - Status +type TCloseOperationResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` +} + +func NewTCloseOperationResp() *TCloseOperationResp { + return &TCloseOperationResp{} +} + +var TCloseOperationResp_Status_DEFAULT *TStatus +func (p *TCloseOperationResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TCloseOperationResp_Status_DEFAULT + } +return p.Status +} +func (p *TCloseOperationResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCloseOperationResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TCloseOperationResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TCloseOperationResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCloseOperationResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCloseOperationResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TCloseOperationResp) Equals(other *TCloseOperationResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + return true +} + +func (p *TCloseOperationResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCloseOperationResp(%+v)", *p) +} + +func (p *TCloseOperationResp) Validate() error { + return nil +} +// Attributes: +// - OperationHandle +// - IncludeCloudResultFiles +type TGetResultSetMetadataReq struct { + OperationHandle *TOperationHandle `thrift:"operationHandle,1,required" db:"operationHandle" json:"operationHandle"` + // unused fields # 2 to 3328 + IncludeCloudResultFiles *bool `thrift:"includeCloudResultFiles,3329" db:"includeCloudResultFiles" json:"includeCloudResultFiles,omitempty"` +} + +func NewTGetResultSetMetadataReq() *TGetResultSetMetadataReq { + return &TGetResultSetMetadataReq{} +} + +var TGetResultSetMetadataReq_OperationHandle_DEFAULT *TOperationHandle +func (p *TGetResultSetMetadataReq) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TGetResultSetMetadataReq_OperationHandle_DEFAULT + } +return p.OperationHandle +} +var TGetResultSetMetadataReq_IncludeCloudResultFiles_DEFAULT bool +func (p *TGetResultSetMetadataReq) GetIncludeCloudResultFiles() bool { + if !p.IsSetIncludeCloudResultFiles() { + return TGetResultSetMetadataReq_IncludeCloudResultFiles_DEFAULT + } +return *p.IncludeCloudResultFiles +} +func (p *TGetResultSetMetadataReq) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TGetResultSetMetadataReq) IsSetIncludeCloudResultFiles() bool { + return p.IncludeCloudResultFiles != nil +} + +func (p *TGetResultSetMetadataReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationHandle bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationHandle is not set")); + } + return nil +} + +func (p *TGetResultSetMetadataReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TGetResultSetMetadataReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.IncludeCloudResultFiles = &v +} + return nil +} + +func (p *TGetResultSetMetadataReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetResultSetMetadataReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetResultSetMetadataReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationHandle: ", p), err) } + return err +} + +func (p *TGetResultSetMetadataReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIncludeCloudResultFiles() { + if err := oprot.WriteFieldBegin(ctx, "includeCloudResultFiles", thrift.BOOL, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:includeCloudResultFiles: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IncludeCloudResultFiles)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.includeCloudResultFiles (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:includeCloudResultFiles: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataReq) Equals(other *TGetResultSetMetadataReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if p.IncludeCloudResultFiles != other.IncludeCloudResultFiles { + if p.IncludeCloudResultFiles == nil || other.IncludeCloudResultFiles == nil { + return false + } + if (*p.IncludeCloudResultFiles) != (*other.IncludeCloudResultFiles) { return false } + } + return true +} + +func (p *TGetResultSetMetadataReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetResultSetMetadataReq(%+v)", *p) +} + +func (p *TGetResultSetMetadataReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - Schema +// - ResultFormat +// - Lz4Compressed +// - ArrowSchema +// - CacheLookupResult_ +// - UncompressedBytes +// - CompressedBytes +// - IsStagingOperation +// - ReasonForNoCloudFetch +// - ResultFiles +// - ManifestFile +// - ManifestFileFormat +// - CacheLookupLatency +// - RemoteCacheMissReason +// - FetchDisposition +// - RemoteResultCacheEnabled +// - IsServerless +// - ResultDataFormat +// - TruncatedByThriftLimit +// - ResultByteLimit +type TGetResultSetMetadataResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + Schema *TTableSchema `thrift:"schema,2" db:"schema" json:"schema,omitempty"` + // unused fields # 3 to 1280 + ResultFormat *TSparkRowSetType `thrift:"resultFormat,1281" db:"resultFormat" json:"resultFormat,omitempty"` + Lz4Compressed *bool `thrift:"lz4Compressed,1282" db:"lz4Compressed" json:"lz4Compressed,omitempty"` + ArrowSchema []byte `thrift:"arrowSchema,1283" db:"arrowSchema" json:"arrowSchema,omitempty"` + CacheLookupResult_ *TCacheLookupResult_ `thrift:"cacheLookupResult,1284" db:"cacheLookupResult" json:"cacheLookupResult,omitempty"` + UncompressedBytes *int64 `thrift:"uncompressedBytes,1285" db:"uncompressedBytes" json:"uncompressedBytes,omitempty"` + CompressedBytes *int64 `thrift:"compressedBytes,1286" db:"compressedBytes" json:"compressedBytes,omitempty"` + IsStagingOperation *bool `thrift:"isStagingOperation,1287" db:"isStagingOperation" json:"isStagingOperation,omitempty"` + // unused fields # 1288 to 3328 + ReasonForNoCloudFetch *TCloudFetchDisabledReason `thrift:"reasonForNoCloudFetch,3329" db:"reasonForNoCloudFetch" json:"reasonForNoCloudFetch,omitempty"` + ResultFiles []*TDBSqlCloudResultFile `thrift:"resultFiles,3330" db:"resultFiles" json:"resultFiles,omitempty"` + ManifestFile *string `thrift:"manifestFile,3331" db:"manifestFile" json:"manifestFile,omitempty"` + ManifestFileFormat *TDBSqlManifestFileFormat `thrift:"manifestFileFormat,3332" db:"manifestFileFormat" json:"manifestFileFormat,omitempty"` + CacheLookupLatency *int64 `thrift:"cacheLookupLatency,3333" db:"cacheLookupLatency" json:"cacheLookupLatency,omitempty"` + RemoteCacheMissReason *string `thrift:"remoteCacheMissReason,3334" db:"remoteCacheMissReason" json:"remoteCacheMissReason,omitempty"` + FetchDisposition *TDBSqlFetchDisposition `thrift:"fetchDisposition,3335" db:"fetchDisposition" json:"fetchDisposition,omitempty"` + RemoteResultCacheEnabled *bool `thrift:"remoteResultCacheEnabled,3336" db:"remoteResultCacheEnabled" json:"remoteResultCacheEnabled,omitempty"` + IsServerless *bool `thrift:"isServerless,3337" db:"isServerless" json:"isServerless,omitempty"` + // unused fields # 3338 to 3343 + ResultDataFormat *TDBSqlResultFormat `thrift:"resultDataFormat,3344" db:"resultDataFormat" json:"resultDataFormat,omitempty"` + TruncatedByThriftLimit *bool `thrift:"truncatedByThriftLimit,3345" db:"truncatedByThriftLimit" json:"truncatedByThriftLimit,omitempty"` + ResultByteLimit *int64 `thrift:"resultByteLimit,3346" db:"resultByteLimit" json:"resultByteLimit,omitempty"` +} + +func NewTGetResultSetMetadataResp() *TGetResultSetMetadataResp { + return &TGetResultSetMetadataResp{} +} + +var TGetResultSetMetadataResp_Status_DEFAULT *TStatus +func (p *TGetResultSetMetadataResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetResultSetMetadataResp_Status_DEFAULT + } +return p.Status +} +var TGetResultSetMetadataResp_Schema_DEFAULT *TTableSchema +func (p *TGetResultSetMetadataResp) GetSchema() *TTableSchema { + if !p.IsSetSchema() { + return TGetResultSetMetadataResp_Schema_DEFAULT + } +return p.Schema +} +var TGetResultSetMetadataResp_ResultFormat_DEFAULT TSparkRowSetType +func (p *TGetResultSetMetadataResp) GetResultFormat() TSparkRowSetType { + if !p.IsSetResultFormat() { + return TGetResultSetMetadataResp_ResultFormat_DEFAULT + } +return *p.ResultFormat +} +var TGetResultSetMetadataResp_Lz4Compressed_DEFAULT bool +func (p *TGetResultSetMetadataResp) GetLz4Compressed() bool { + if !p.IsSetLz4Compressed() { + return TGetResultSetMetadataResp_Lz4Compressed_DEFAULT + } +return *p.Lz4Compressed +} +var TGetResultSetMetadataResp_ArrowSchema_DEFAULT []byte + +func (p *TGetResultSetMetadataResp) GetArrowSchema() []byte { + return p.ArrowSchema +} +var TGetResultSetMetadataResp_CacheLookupResult__DEFAULT TCacheLookupResult_ +func (p *TGetResultSetMetadataResp) GetCacheLookupResult_() TCacheLookupResult_ { + if !p.IsSetCacheLookupResult_() { + return TGetResultSetMetadataResp_CacheLookupResult__DEFAULT + } +return *p.CacheLookupResult_ +} +var TGetResultSetMetadataResp_UncompressedBytes_DEFAULT int64 +func (p *TGetResultSetMetadataResp) GetUncompressedBytes() int64 { + if !p.IsSetUncompressedBytes() { + return TGetResultSetMetadataResp_UncompressedBytes_DEFAULT + } +return *p.UncompressedBytes +} +var TGetResultSetMetadataResp_CompressedBytes_DEFAULT int64 +func (p *TGetResultSetMetadataResp) GetCompressedBytes() int64 { + if !p.IsSetCompressedBytes() { + return TGetResultSetMetadataResp_CompressedBytes_DEFAULT + } +return *p.CompressedBytes +} +var TGetResultSetMetadataResp_IsStagingOperation_DEFAULT bool +func (p *TGetResultSetMetadataResp) GetIsStagingOperation() bool { + if !p.IsSetIsStagingOperation() { + return TGetResultSetMetadataResp_IsStagingOperation_DEFAULT + } +return *p.IsStagingOperation +} +var TGetResultSetMetadataResp_ReasonForNoCloudFetch_DEFAULT TCloudFetchDisabledReason +func (p *TGetResultSetMetadataResp) GetReasonForNoCloudFetch() TCloudFetchDisabledReason { + if !p.IsSetReasonForNoCloudFetch() { + return TGetResultSetMetadataResp_ReasonForNoCloudFetch_DEFAULT + } +return *p.ReasonForNoCloudFetch +} +var TGetResultSetMetadataResp_ResultFiles_DEFAULT []*TDBSqlCloudResultFile + +func (p *TGetResultSetMetadataResp) GetResultFiles() []*TDBSqlCloudResultFile { + return p.ResultFiles +} +var TGetResultSetMetadataResp_ManifestFile_DEFAULT string +func (p *TGetResultSetMetadataResp) GetManifestFile() string { + if !p.IsSetManifestFile() { + return TGetResultSetMetadataResp_ManifestFile_DEFAULT + } +return *p.ManifestFile +} +var TGetResultSetMetadataResp_ManifestFileFormat_DEFAULT TDBSqlManifestFileFormat +func (p *TGetResultSetMetadataResp) GetManifestFileFormat() TDBSqlManifestFileFormat { + if !p.IsSetManifestFileFormat() { + return TGetResultSetMetadataResp_ManifestFileFormat_DEFAULT + } +return *p.ManifestFileFormat +} +var TGetResultSetMetadataResp_CacheLookupLatency_DEFAULT int64 +func (p *TGetResultSetMetadataResp) GetCacheLookupLatency() int64 { + if !p.IsSetCacheLookupLatency() { + return TGetResultSetMetadataResp_CacheLookupLatency_DEFAULT + } +return *p.CacheLookupLatency +} +var TGetResultSetMetadataResp_RemoteCacheMissReason_DEFAULT string +func (p *TGetResultSetMetadataResp) GetRemoteCacheMissReason() string { + if !p.IsSetRemoteCacheMissReason() { + return TGetResultSetMetadataResp_RemoteCacheMissReason_DEFAULT + } +return *p.RemoteCacheMissReason +} +var TGetResultSetMetadataResp_FetchDisposition_DEFAULT TDBSqlFetchDisposition +func (p *TGetResultSetMetadataResp) GetFetchDisposition() TDBSqlFetchDisposition { + if !p.IsSetFetchDisposition() { + return TGetResultSetMetadataResp_FetchDisposition_DEFAULT + } +return *p.FetchDisposition +} +var TGetResultSetMetadataResp_RemoteResultCacheEnabled_DEFAULT bool +func (p *TGetResultSetMetadataResp) GetRemoteResultCacheEnabled() bool { + if !p.IsSetRemoteResultCacheEnabled() { + return TGetResultSetMetadataResp_RemoteResultCacheEnabled_DEFAULT + } +return *p.RemoteResultCacheEnabled +} +var TGetResultSetMetadataResp_IsServerless_DEFAULT bool +func (p *TGetResultSetMetadataResp) GetIsServerless() bool { + if !p.IsSetIsServerless() { + return TGetResultSetMetadataResp_IsServerless_DEFAULT + } +return *p.IsServerless +} +var TGetResultSetMetadataResp_ResultDataFormat_DEFAULT *TDBSqlResultFormat +func (p *TGetResultSetMetadataResp) GetResultDataFormat() *TDBSqlResultFormat { + if !p.IsSetResultDataFormat() { + return TGetResultSetMetadataResp_ResultDataFormat_DEFAULT + } +return p.ResultDataFormat +} +var TGetResultSetMetadataResp_TruncatedByThriftLimit_DEFAULT bool +func (p *TGetResultSetMetadataResp) GetTruncatedByThriftLimit() bool { + if !p.IsSetTruncatedByThriftLimit() { + return TGetResultSetMetadataResp_TruncatedByThriftLimit_DEFAULT + } +return *p.TruncatedByThriftLimit +} +var TGetResultSetMetadataResp_ResultByteLimit_DEFAULT int64 +func (p *TGetResultSetMetadataResp) GetResultByteLimit() int64 { + if !p.IsSetResultByteLimit() { + return TGetResultSetMetadataResp_ResultByteLimit_DEFAULT + } +return *p.ResultByteLimit +} +func (p *TGetResultSetMetadataResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetResultSetMetadataResp) IsSetSchema() bool { + return p.Schema != nil +} + +func (p *TGetResultSetMetadataResp) IsSetResultFormat() bool { + return p.ResultFormat != nil +} + +func (p *TGetResultSetMetadataResp) IsSetLz4Compressed() bool { + return p.Lz4Compressed != nil +} + +func (p *TGetResultSetMetadataResp) IsSetArrowSchema() bool { + return p.ArrowSchema != nil +} + +func (p *TGetResultSetMetadataResp) IsSetCacheLookupResult_() bool { + return p.CacheLookupResult_ != nil +} + +func (p *TGetResultSetMetadataResp) IsSetUncompressedBytes() bool { + return p.UncompressedBytes != nil +} + +func (p *TGetResultSetMetadataResp) IsSetCompressedBytes() bool { + return p.CompressedBytes != nil +} + +func (p *TGetResultSetMetadataResp) IsSetIsStagingOperation() bool { + return p.IsStagingOperation != nil +} + +func (p *TGetResultSetMetadataResp) IsSetReasonForNoCloudFetch() bool { + return p.ReasonForNoCloudFetch != nil +} + +func (p *TGetResultSetMetadataResp) IsSetResultFiles() bool { + return p.ResultFiles != nil +} + +func (p *TGetResultSetMetadataResp) IsSetManifestFile() bool { + return p.ManifestFile != nil +} + +func (p *TGetResultSetMetadataResp) IsSetManifestFileFormat() bool { + return p.ManifestFileFormat != nil +} + +func (p *TGetResultSetMetadataResp) IsSetCacheLookupLatency() bool { + return p.CacheLookupLatency != nil +} + +func (p *TGetResultSetMetadataResp) IsSetRemoteCacheMissReason() bool { + return p.RemoteCacheMissReason != nil +} + +func (p *TGetResultSetMetadataResp) IsSetFetchDisposition() bool { + return p.FetchDisposition != nil +} + +func (p *TGetResultSetMetadataResp) IsSetRemoteResultCacheEnabled() bool { + return p.RemoteResultCacheEnabled != nil +} + +func (p *TGetResultSetMetadataResp) IsSetIsServerless() bool { + return p.IsServerless != nil +} + +func (p *TGetResultSetMetadataResp) IsSetResultDataFormat() bool { + return p.ResultDataFormat != nil +} + +func (p *TGetResultSetMetadataResp) IsSetTruncatedByThriftLimit() bool { + return p.TruncatedByThriftLimit != nil +} + +func (p *TGetResultSetMetadataResp) IsSetResultByteLimit() bool { + return p.ResultByteLimit != nil +} + +func (p *TGetResultSetMetadataResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1283: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1283(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1284: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1284(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1285: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1285(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1286: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1286(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1287: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1287(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3330: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3330(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3331: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3331(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3332: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3332(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3333: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3333(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3334: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3334(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3335: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3335(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3336: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3336(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3337: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3337(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3344: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3344(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3345: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3345(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3346: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3346(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.Schema = &TTableSchema{} + if err := p.Schema.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Schema), err) + } + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1281: ", err) +} else { + temp := TSparkRowSetType(v) + p.ResultFormat = &temp +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.Lz4Compressed = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1283(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 1283: ", err) +} else { + p.ArrowSchema = v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1284(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1284: ", err) +} else { + temp := TCacheLookupResult_(v) + p.CacheLookupResult_ = &temp +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1285(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1285: ", err) +} else { + p.UncompressedBytes = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1286(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1286: ", err) +} else { + p.CompressedBytes = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField1287(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1287: ", err) +} else { + p.IsStagingOperation = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + temp := TCloudFetchDisabledReason(v) + p.ReasonForNoCloudFetch = &temp +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3330(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*TDBSqlCloudResultFile, 0, size) + p.ResultFiles = tSlice + for i := 0; i < size; i ++ { + _elem86 := &TDBSqlCloudResultFile{} + if err := _elem86.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem86), err) + } + p.ResultFiles = append(p.ResultFiles, _elem86) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3331(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3331: ", err) +} else { + p.ManifestFile = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3332(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3332: ", err) +} else { + temp := TDBSqlManifestFileFormat(v) + p.ManifestFileFormat = &temp +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3333(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3333: ", err) +} else { + p.CacheLookupLatency = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3334(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3334: ", err) +} else { + p.RemoteCacheMissReason = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3335(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3335: ", err) +} else { + temp := TDBSqlFetchDisposition(v) + p.FetchDisposition = &temp +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3336(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3336: ", err) +} else { + p.RemoteResultCacheEnabled = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3337(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3337: ", err) +} else { + p.IsServerless = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3344(ctx context.Context, iprot thrift.TProtocol) error { + p.ResultDataFormat = &TDBSqlResultFormat{} + if err := p.ResultDataFormat.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ResultDataFormat), err) + } + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3345(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 3345: ", err) +} else { + p.TruncatedByThriftLimit = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) ReadField3346(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3346: ", err) +} else { + p.ResultByteLimit = &v +} + return nil +} + +func (p *TGetResultSetMetadataResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetResultSetMetadataResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField1283(ctx, oprot); err != nil { return err } + if err := p.writeField1284(ctx, oprot); err != nil { return err } + if err := p.writeField1285(ctx, oprot); err != nil { return err } + if err := p.writeField1286(ctx, oprot); err != nil { return err } + if err := p.writeField1287(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + if err := p.writeField3330(ctx, oprot); err != nil { return err } + if err := p.writeField3331(ctx, oprot); err != nil { return err } + if err := p.writeField3332(ctx, oprot); err != nil { return err } + if err := p.writeField3333(ctx, oprot); err != nil { return err } + if err := p.writeField3334(ctx, oprot); err != nil { return err } + if err := p.writeField3335(ctx, oprot); err != nil { return err } + if err := p.writeField3336(ctx, oprot); err != nil { return err } + if err := p.writeField3337(ctx, oprot); err != nil { return err } + if err := p.writeField3344(ctx, oprot); err != nil { return err } + if err := p.writeField3345(ctx, oprot); err != nil { return err } + if err := p.writeField3346(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetResultSetMetadataResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetResultSetMetadataResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSchema() { + if err := oprot.WriteFieldBegin(ctx, "schema", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:schema: ", p), err) } + if err := p.Schema.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Schema), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:schema: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultFormat() { + if err := oprot.WriteFieldBegin(ctx, "resultFormat", thrift.I32, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:resultFormat: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ResultFormat)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultFormat (1281) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:resultFormat: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLz4Compressed() { + if err := oprot.WriteFieldBegin(ctx, "lz4Compressed", thrift.BOOL, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:lz4Compressed: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.Lz4Compressed)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.lz4Compressed (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:lz4Compressed: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1283(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetArrowSchema() { + if err := oprot.WriteFieldBegin(ctx, "arrowSchema", thrift.STRING, 1283); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1283:arrowSchema: ", p), err) } + if err := oprot.WriteBinary(ctx, p.ArrowSchema); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.arrowSchema (1283) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1283:arrowSchema: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1284(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCacheLookupResult_() { + if err := oprot.WriteFieldBegin(ctx, "cacheLookupResult", thrift.I32, 1284); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1284:cacheLookupResult: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.CacheLookupResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cacheLookupResult (1284) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1284:cacheLookupResult: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1285(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetUncompressedBytes() { + if err := oprot.WriteFieldBegin(ctx, "uncompressedBytes", thrift.I64, 1285); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1285:uncompressedBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.UncompressedBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.uncompressedBytes (1285) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1285:uncompressedBytes: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1286(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCompressedBytes() { + if err := oprot.WriteFieldBegin(ctx, "compressedBytes", thrift.I64, 1286); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1286:compressedBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.CompressedBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.compressedBytes (1286) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1286:compressedBytes: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField1287(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIsStagingOperation() { + if err := oprot.WriteFieldBegin(ctx, "isStagingOperation", thrift.BOOL, 1287); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1287:isStagingOperation: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IsStagingOperation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isStagingOperation (1287) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1287:isStagingOperation: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetReasonForNoCloudFetch() { + if err := oprot.WriteFieldBegin(ctx, "reasonForNoCloudFetch", thrift.I32, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:reasonForNoCloudFetch: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ReasonForNoCloudFetch)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reasonForNoCloudFetch (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:reasonForNoCloudFetch: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3330(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultFiles() { + if err := oprot.WriteFieldBegin(ctx, "resultFiles", thrift.LIST, 3330); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3330:resultFiles: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ResultFiles)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ResultFiles { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3330:resultFiles: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3331(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetManifestFile() { + if err := oprot.WriteFieldBegin(ctx, "manifestFile", thrift.STRING, 3331); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3331:manifestFile: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.ManifestFile)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.manifestFile (3331) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3331:manifestFile: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3332(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetManifestFileFormat() { + if err := oprot.WriteFieldBegin(ctx, "manifestFileFormat", thrift.I32, 3332); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3332:manifestFileFormat: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.ManifestFileFormat)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.manifestFileFormat (3332) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3332:manifestFileFormat: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3333(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetCacheLookupLatency() { + if err := oprot.WriteFieldBegin(ctx, "cacheLookupLatency", thrift.I64, 3333); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3333:cacheLookupLatency: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.CacheLookupLatency)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cacheLookupLatency (3333) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3333:cacheLookupLatency: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3334(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteCacheMissReason() { + if err := oprot.WriteFieldBegin(ctx, "remoteCacheMissReason", thrift.STRING, 3334); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3334:remoteCacheMissReason: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.RemoteCacheMissReason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remoteCacheMissReason (3334) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3334:remoteCacheMissReason: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3335(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFetchDisposition() { + if err := oprot.WriteFieldBegin(ctx, "fetchDisposition", thrift.I32, 3335); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3335:fetchDisposition: ", p), err) } + if err := oprot.WriteI32(ctx, int32(*p.FetchDisposition)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fetchDisposition (3335) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3335:fetchDisposition: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3336(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteResultCacheEnabled() { + if err := oprot.WriteFieldBegin(ctx, "remoteResultCacheEnabled", thrift.BOOL, 3336); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3336:remoteResultCacheEnabled: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.RemoteResultCacheEnabled)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remoteResultCacheEnabled (3336) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3336:remoteResultCacheEnabled: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3337(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIsServerless() { + if err := oprot.WriteFieldBegin(ctx, "isServerless", thrift.BOOL, 3337); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3337:isServerless: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IsServerless)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.isServerless (3337) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3337:isServerless: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3344(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultDataFormat() { + if err := oprot.WriteFieldBegin(ctx, "resultDataFormat", thrift.STRUCT, 3344); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3344:resultDataFormat: ", p), err) } + if err := p.ResultDataFormat.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ResultDataFormat), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3344:resultDataFormat: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3345(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTruncatedByThriftLimit() { + if err := oprot.WriteFieldBegin(ctx, "truncatedByThriftLimit", thrift.BOOL, 3345); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3345:truncatedByThriftLimit: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.TruncatedByThriftLimit)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.truncatedByThriftLimit (3345) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3345:truncatedByThriftLimit: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) writeField3346(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultByteLimit() { + if err := oprot.WriteFieldBegin(ctx, "resultByteLimit", thrift.I64, 3346); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3346:resultByteLimit: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ResultByteLimit)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.resultByteLimit (3346) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3346:resultByteLimit: ", p), err) } + } + return err +} + +func (p *TGetResultSetMetadataResp) Equals(other *TGetResultSetMetadataResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if !p.Schema.Equals(other.Schema) { return false } + if p.ResultFormat != other.ResultFormat { + if p.ResultFormat == nil || other.ResultFormat == nil { + return false + } + if (*p.ResultFormat) != (*other.ResultFormat) { return false } + } + if p.Lz4Compressed != other.Lz4Compressed { + if p.Lz4Compressed == nil || other.Lz4Compressed == nil { + return false + } + if (*p.Lz4Compressed) != (*other.Lz4Compressed) { return false } + } + if bytes.Compare(p.ArrowSchema, other.ArrowSchema) != 0 { return false } + if p.CacheLookupResult_ != other.CacheLookupResult_ { + if p.CacheLookupResult_ == nil || other.CacheLookupResult_ == nil { + return false + } + if (*p.CacheLookupResult_) != (*other.CacheLookupResult_) { return false } + } + if p.UncompressedBytes != other.UncompressedBytes { + if p.UncompressedBytes == nil || other.UncompressedBytes == nil { + return false + } + if (*p.UncompressedBytes) != (*other.UncompressedBytes) { return false } + } + if p.CompressedBytes != other.CompressedBytes { + if p.CompressedBytes == nil || other.CompressedBytes == nil { + return false + } + if (*p.CompressedBytes) != (*other.CompressedBytes) { return false } + } + if p.IsStagingOperation != other.IsStagingOperation { + if p.IsStagingOperation == nil || other.IsStagingOperation == nil { + return false + } + if (*p.IsStagingOperation) != (*other.IsStagingOperation) { return false } + } + if p.ReasonForNoCloudFetch != other.ReasonForNoCloudFetch { + if p.ReasonForNoCloudFetch == nil || other.ReasonForNoCloudFetch == nil { + return false + } + if (*p.ReasonForNoCloudFetch) != (*other.ReasonForNoCloudFetch) { return false } + } + if len(p.ResultFiles) != len(other.ResultFiles) { return false } + for i, _tgt := range p.ResultFiles { + _src87 := other.ResultFiles[i] + if !_tgt.Equals(_src87) { return false } + } + if p.ManifestFile != other.ManifestFile { + if p.ManifestFile == nil || other.ManifestFile == nil { + return false + } + if (*p.ManifestFile) != (*other.ManifestFile) { return false } + } + if p.ManifestFileFormat != other.ManifestFileFormat { + if p.ManifestFileFormat == nil || other.ManifestFileFormat == nil { + return false + } + if (*p.ManifestFileFormat) != (*other.ManifestFileFormat) { return false } + } + if p.CacheLookupLatency != other.CacheLookupLatency { + if p.CacheLookupLatency == nil || other.CacheLookupLatency == nil { + return false + } + if (*p.CacheLookupLatency) != (*other.CacheLookupLatency) { return false } + } + if p.RemoteCacheMissReason != other.RemoteCacheMissReason { + if p.RemoteCacheMissReason == nil || other.RemoteCacheMissReason == nil { + return false + } + if (*p.RemoteCacheMissReason) != (*other.RemoteCacheMissReason) { return false } + } + if p.FetchDisposition != other.FetchDisposition { + if p.FetchDisposition == nil || other.FetchDisposition == nil { + return false + } + if (*p.FetchDisposition) != (*other.FetchDisposition) { return false } + } + if p.RemoteResultCacheEnabled != other.RemoteResultCacheEnabled { + if p.RemoteResultCacheEnabled == nil || other.RemoteResultCacheEnabled == nil { + return false + } + if (*p.RemoteResultCacheEnabled) != (*other.RemoteResultCacheEnabled) { return false } + } + if p.IsServerless != other.IsServerless { + if p.IsServerless == nil || other.IsServerless == nil { + return false + } + if (*p.IsServerless) != (*other.IsServerless) { return false } + } + if !p.ResultDataFormat.Equals(other.ResultDataFormat) { return false } + if p.TruncatedByThriftLimit != other.TruncatedByThriftLimit { + if p.TruncatedByThriftLimit == nil || other.TruncatedByThriftLimit == nil { + return false + } + if (*p.TruncatedByThriftLimit) != (*other.TruncatedByThriftLimit) { return false } + } + if p.ResultByteLimit != other.ResultByteLimit { + if p.ResultByteLimit == nil || other.ResultByteLimit == nil { + return false + } + if (*p.ResultByteLimit) != (*other.ResultByteLimit) { return false } + } + return true +} + +func (p *TGetResultSetMetadataResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetResultSetMetadataResp(%+v)", *p) +} + +func (p *TGetResultSetMetadataResp) Validate() error { + return nil +} +// Attributes: +// - OperationHandle +// - Orientation +// - MaxRows +// - FetchType +// - MaxBytes +// - StartRowOffset +// - IncludeResultSetMetadata +type TFetchResultsReq struct { + OperationHandle *TOperationHandle `thrift:"operationHandle,1,required" db:"operationHandle" json:"operationHandle"` + Orientation TFetchOrientation `thrift:"orientation,2,required" db:"orientation" json:"orientation"` + MaxRows int64 `thrift:"maxRows,3,required" db:"maxRows" json:"maxRows"` + FetchType int16 `thrift:"fetchType,4" db:"fetchType" json:"fetchType"` + // unused fields # 5 to 1280 + MaxBytes *int64 `thrift:"maxBytes,1281" db:"maxBytes" json:"maxBytes,omitempty"` + StartRowOffset *int64 `thrift:"startRowOffset,1282" db:"startRowOffset" json:"startRowOffset,omitempty"` + IncludeResultSetMetadata *bool `thrift:"includeResultSetMetadata,1283" db:"includeResultSetMetadata" json:"includeResultSetMetadata,omitempty"` +} + +func NewTFetchResultsReq() *TFetchResultsReq { + return &TFetchResultsReq{ +Orientation: 0, +} +} + +var TFetchResultsReq_OperationHandle_DEFAULT *TOperationHandle +func (p *TFetchResultsReq) GetOperationHandle() *TOperationHandle { + if !p.IsSetOperationHandle() { + return TFetchResultsReq_OperationHandle_DEFAULT + } +return p.OperationHandle +} + +func (p *TFetchResultsReq) GetOrientation() TFetchOrientation { + return p.Orientation +} + +func (p *TFetchResultsReq) GetMaxRows() int64 { + return p.MaxRows +} +var TFetchResultsReq_FetchType_DEFAULT int16 = 0 + +func (p *TFetchResultsReq) GetFetchType() int16 { + return p.FetchType +} +var TFetchResultsReq_MaxBytes_DEFAULT int64 +func (p *TFetchResultsReq) GetMaxBytes() int64 { + if !p.IsSetMaxBytes() { + return TFetchResultsReq_MaxBytes_DEFAULT + } +return *p.MaxBytes +} +var TFetchResultsReq_StartRowOffset_DEFAULT int64 +func (p *TFetchResultsReq) GetStartRowOffset() int64 { + if !p.IsSetStartRowOffset() { + return TFetchResultsReq_StartRowOffset_DEFAULT + } +return *p.StartRowOffset +} +var TFetchResultsReq_IncludeResultSetMetadata_DEFAULT bool +func (p *TFetchResultsReq) GetIncludeResultSetMetadata() bool { + if !p.IsSetIncludeResultSetMetadata() { + return TFetchResultsReq_IncludeResultSetMetadata_DEFAULT + } +return *p.IncludeResultSetMetadata +} +func (p *TFetchResultsReq) IsSetOperationHandle() bool { + return p.OperationHandle != nil +} + +func (p *TFetchResultsReq) IsSetFetchType() bool { + return p.FetchType != TFetchResultsReq_FetchType_DEFAULT +} + +func (p *TFetchResultsReq) IsSetMaxBytes() bool { + return p.MaxBytes != nil +} + +func (p *TFetchResultsReq) IsSetStartRowOffset() bool { + return p.StartRowOffset != nil +} + +func (p *TFetchResultsReq) IsSetIncludeResultSetMetadata() bool { + return p.IncludeResultSetMetadata != nil +} + +func (p *TFetchResultsReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperationHandle bool = false; + var issetOrientation bool = false; + var issetMaxRows bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperationHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetOrientation = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetMaxRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I16 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1282: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1282(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1283: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1283(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperationHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationHandle is not set")); + } + if !issetOrientation{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Orientation is not set")); + } + if !issetMaxRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxRows is not set")); + } + return nil +} + +func (p *TFetchResultsReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationHandle = &TOperationHandle{} + if err := p.OperationHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationHandle), err) + } + return nil +} + +func (p *TFetchResultsReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TFetchOrientation(v) + p.Orientation = temp +} + return nil +} + +func (p *TFetchResultsReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.MaxRows = v +} + return nil +} + +func (p *TFetchResultsReq) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.FetchType = v +} + return nil +} + +func (p *TFetchResultsReq) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1281: ", err) +} else { + p.MaxBytes = &v +} + return nil +} + +func (p *TFetchResultsReq) ReadField1282(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1282: ", err) +} else { + p.StartRowOffset = &v +} + return nil +} + +func (p *TFetchResultsReq) ReadField1283(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1283: ", err) +} else { + p.IncludeResultSetMetadata = &v +} + return nil +} + +func (p *TFetchResultsReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TFetchResultsReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField1282(ctx, oprot); err != nil { return err } + if err := p.writeField1283(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TFetchResultsReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operationHandle: ", p), err) } + if err := p.OperationHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operationHandle: ", p), err) } + return err +} + +func (p *TFetchResultsReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "orientation", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:orientation: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Orientation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.orientation (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:orientation: ", p), err) } + return err +} + +func (p *TFetchResultsReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "maxRows", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:maxRows: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.MaxRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxRows (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:maxRows: ", p), err) } + return err +} + +func (p *TFetchResultsReq) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetFetchType() { + if err := oprot.WriteFieldBegin(ctx, "fetchType", thrift.I16, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:fetchType: ", p), err) } + if err := oprot.WriteI16(ctx, int16(p.FetchType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fetchType (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:fetchType: ", p), err) } + } + return err +} + +func (p *TFetchResultsReq) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetMaxBytes() { + if err := oprot.WriteFieldBegin(ctx, "maxBytes", thrift.I64, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:maxBytes: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.MaxBytes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxBytes (1281) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:maxBytes: ", p), err) } + } + return err +} + +func (p *TFetchResultsReq) writeField1282(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStartRowOffset() { + if err := oprot.WriteFieldBegin(ctx, "startRowOffset", thrift.I64, 1282); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1282:startRowOffset: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.StartRowOffset)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startRowOffset (1282) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1282:startRowOffset: ", p), err) } + } + return err +} + +func (p *TFetchResultsReq) writeField1283(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIncludeResultSetMetadata() { + if err := oprot.WriteFieldBegin(ctx, "includeResultSetMetadata", thrift.BOOL, 1283); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1283:includeResultSetMetadata: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.IncludeResultSetMetadata)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.includeResultSetMetadata (1283) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1283:includeResultSetMetadata: ", p), err) } + } + return err +} + +func (p *TFetchResultsReq) Equals(other *TFetchResultsReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.OperationHandle.Equals(other.OperationHandle) { return false } + if p.Orientation != other.Orientation { return false } + if p.MaxRows != other.MaxRows { return false } + if p.FetchType != other.FetchType { return false } + if p.MaxBytes != other.MaxBytes { + if p.MaxBytes == nil || other.MaxBytes == nil { + return false + } + if (*p.MaxBytes) != (*other.MaxBytes) { return false } + } + if p.StartRowOffset != other.StartRowOffset { + if p.StartRowOffset == nil || other.StartRowOffset == nil { + return false + } + if (*p.StartRowOffset) != (*other.StartRowOffset) { return false } + } + if p.IncludeResultSetMetadata != other.IncludeResultSetMetadata { + if p.IncludeResultSetMetadata == nil || other.IncludeResultSetMetadata == nil { + return false + } + if (*p.IncludeResultSetMetadata) != (*other.IncludeResultSetMetadata) { return false } + } + return true +} + +func (p *TFetchResultsReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFetchResultsReq(%+v)", *p) +} + +func (p *TFetchResultsReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - HasMoreRows +// - Results +// - ResultSetMetadata +// - ResponseValidation +type TFetchResultsResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + HasMoreRows *bool `thrift:"hasMoreRows,2" db:"hasMoreRows" json:"hasMoreRows,omitempty"` + Results *TRowSet `thrift:"results,3" db:"results" json:"results,omitempty"` + // unused fields # 4 to 1280 + ResultSetMetadata *TGetResultSetMetadataResp `thrift:"resultSetMetadata,1281" db:"resultSetMetadata" json:"resultSetMetadata,omitempty"` + // unused fields # 1282 to 3328 + ResponseValidation []byte `thrift:"responseValidation,3329" db:"responseValidation" json:"responseValidation,omitempty"` +} + +func NewTFetchResultsResp() *TFetchResultsResp { + return &TFetchResultsResp{} +} + +var TFetchResultsResp_Status_DEFAULT *TStatus +func (p *TFetchResultsResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TFetchResultsResp_Status_DEFAULT + } +return p.Status +} +var TFetchResultsResp_HasMoreRows_DEFAULT bool +func (p *TFetchResultsResp) GetHasMoreRows() bool { + if !p.IsSetHasMoreRows() { + return TFetchResultsResp_HasMoreRows_DEFAULT + } +return *p.HasMoreRows +} +var TFetchResultsResp_Results_DEFAULT *TRowSet +func (p *TFetchResultsResp) GetResults() *TRowSet { + if !p.IsSetResults() { + return TFetchResultsResp_Results_DEFAULT + } +return p.Results +} +var TFetchResultsResp_ResultSetMetadata_DEFAULT *TGetResultSetMetadataResp +func (p *TFetchResultsResp) GetResultSetMetadata() *TGetResultSetMetadataResp { + if !p.IsSetResultSetMetadata() { + return TFetchResultsResp_ResultSetMetadata_DEFAULT + } +return p.ResultSetMetadata +} +var TFetchResultsResp_ResponseValidation_DEFAULT []byte + +func (p *TFetchResultsResp) GetResponseValidation() []byte { + return p.ResponseValidation +} +func (p *TFetchResultsResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TFetchResultsResp) IsSetHasMoreRows() bool { + return p.HasMoreRows != nil +} + +func (p *TFetchResultsResp) IsSetResults() bool { + return p.Results != nil +} + +func (p *TFetchResultsResp) IsSetResultSetMetadata() bool { + return p.ResultSetMetadata != nil +} + +func (p *TFetchResultsResp) IsSetResponseValidation() bool { + return p.ResponseValidation != nil +} + +func (p *TFetchResultsResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 1281: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1281(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TFetchResultsResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TFetchResultsResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.HasMoreRows = &v +} + return nil +} + +func (p *TFetchResultsResp) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Results = &TRowSet{} + if err := p.Results.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Results), err) + } + return nil +} + +func (p *TFetchResultsResp) ReadField1281(ctx context.Context, iprot thrift.TProtocol) error { + p.ResultSetMetadata = &TGetResultSetMetadataResp{} + if err := p.ResultSetMetadata.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ResultSetMetadata), err) + } + return nil +} + +func (p *TFetchResultsResp) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 3329: ", err) +} else { + p.ResponseValidation = v +} + return nil +} + +func (p *TFetchResultsResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TFetchResultsResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField1281(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TFetchResultsResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TFetchResultsResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHasMoreRows() { + if err := oprot.WriteFieldBegin(ctx, "hasMoreRows", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hasMoreRows: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.HasMoreRows)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hasMoreRows (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hasMoreRows: ", p), err) } + } + return err +} + +func (p *TFetchResultsResp) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResults() { + if err := oprot.WriteFieldBegin(ctx, "results", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:results: ", p), err) } + if err := p.Results.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Results), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:results: ", p), err) } + } + return err +} + +func (p *TFetchResultsResp) writeField1281(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResultSetMetadata() { + if err := oprot.WriteFieldBegin(ctx, "resultSetMetadata", thrift.STRUCT, 1281); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1281:resultSetMetadata: ", p), err) } + if err := p.ResultSetMetadata.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ResultSetMetadata), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1281:resultSetMetadata: ", p), err) } + } + return err +} + +func (p *TFetchResultsResp) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetResponseValidation() { + if err := oprot.WriteFieldBegin(ctx, "responseValidation", thrift.STRING, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:responseValidation: ", p), err) } + if err := oprot.WriteBinary(ctx, p.ResponseValidation); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.responseValidation (3329) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:responseValidation: ", p), err) } + } + return err +} + +func (p *TFetchResultsResp) Equals(other *TFetchResultsResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if p.HasMoreRows != other.HasMoreRows { + if p.HasMoreRows == nil || other.HasMoreRows == nil { + return false + } + if (*p.HasMoreRows) != (*other.HasMoreRows) { return false } + } + if !p.Results.Equals(other.Results) { return false } + if !p.ResultSetMetadata.Equals(other.ResultSetMetadata) { return false } + if bytes.Compare(p.ResponseValidation, other.ResponseValidation) != 0 { return false } + return true +} + +func (p *TFetchResultsResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TFetchResultsResp(%+v)", *p) +} + +func (p *TFetchResultsResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - Owner +// - Renewer +// - SessionConf +type TGetDelegationTokenReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + Owner string `thrift:"owner,2,required" db:"owner" json:"owner"` + Renewer string `thrift:"renewer,3,required" db:"renewer" json:"renewer"` + // unused fields # 4 to 3328 + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3329" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTGetDelegationTokenReq() *TGetDelegationTokenReq { + return &TGetDelegationTokenReq{} +} + +var TGetDelegationTokenReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TGetDelegationTokenReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TGetDelegationTokenReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} + +func (p *TGetDelegationTokenReq) GetOwner() string { + return p.Owner +} + +func (p *TGetDelegationTokenReq) GetRenewer() string { + return p.Renewer +} +var TGetDelegationTokenReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TGetDelegationTokenReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TGetDelegationTokenReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TGetDelegationTokenReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TGetDelegationTokenReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TGetDelegationTokenReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetOwner bool = false; + var issetRenewer bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetOwner = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetRenewer = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetOwner{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Owner is not set")); + } + if !issetRenewer{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Renewer is not set")); + } + return nil +} + +func (p *TGetDelegationTokenReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TGetDelegationTokenReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Owner = v +} + return nil +} + +func (p *TGetDelegationTokenReq) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Renewer = v +} + return nil +} + +func (p *TGetDelegationTokenReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TGetDelegationTokenReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetDelegationTokenReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetDelegationTokenReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TGetDelegationTokenReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "owner", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:owner: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Owner)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.owner (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:owner: ", p), err) } + return err +} + +func (p *TGetDelegationTokenReq) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "renewer", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:renewer: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Renewer)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.renewer (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:renewer: ", p), err) } + return err +} + +func (p *TGetDelegationTokenReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:sessionConf: ", p), err) } + } + return err +} + +func (p *TGetDelegationTokenReq) Equals(other *TGetDelegationTokenReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.Owner != other.Owner { return false } + if p.Renewer != other.Renewer { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TGetDelegationTokenReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetDelegationTokenReq(%+v)", *p) +} + +func (p *TGetDelegationTokenReq) Validate() error { + return nil +} +// Attributes: +// - Status +// - DelegationToken +type TGetDelegationTokenResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` + DelegationToken *string `thrift:"delegationToken,2" db:"delegationToken" json:"delegationToken,omitempty"` +} + +func NewTGetDelegationTokenResp() *TGetDelegationTokenResp { + return &TGetDelegationTokenResp{} +} + +var TGetDelegationTokenResp_Status_DEFAULT *TStatus +func (p *TGetDelegationTokenResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TGetDelegationTokenResp_Status_DEFAULT + } +return p.Status +} +var TGetDelegationTokenResp_DelegationToken_DEFAULT string +func (p *TGetDelegationTokenResp) GetDelegationToken() string { + if !p.IsSetDelegationToken() { + return TGetDelegationTokenResp_DelegationToken_DEFAULT + } +return *p.DelegationToken +} +func (p *TGetDelegationTokenResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TGetDelegationTokenResp) IsSetDelegationToken() bool { + return p.DelegationToken != nil +} + +func (p *TGetDelegationTokenResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TGetDelegationTokenResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TGetDelegationTokenResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DelegationToken = &v +} + return nil +} + +func (p *TGetDelegationTokenResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TGetDelegationTokenResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TGetDelegationTokenResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TGetDelegationTokenResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDelegationToken() { + if err := oprot.WriteFieldBegin(ctx, "delegationToken", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:delegationToken: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.DelegationToken)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.delegationToken (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:delegationToken: ", p), err) } + } + return err +} + +func (p *TGetDelegationTokenResp) Equals(other *TGetDelegationTokenResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + if p.DelegationToken != other.DelegationToken { + if p.DelegationToken == nil || other.DelegationToken == nil { + return false + } + if (*p.DelegationToken) != (*other.DelegationToken) { return false } + } + return true +} + +func (p *TGetDelegationTokenResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TGetDelegationTokenResp(%+v)", *p) +} + +func (p *TGetDelegationTokenResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - DelegationToken +// - SessionConf +type TCancelDelegationTokenReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + DelegationToken string `thrift:"delegationToken,2,required" db:"delegationToken" json:"delegationToken"` + // unused fields # 3 to 3328 + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3329" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTCancelDelegationTokenReq() *TCancelDelegationTokenReq { + return &TCancelDelegationTokenReq{} +} + +var TCancelDelegationTokenReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TCancelDelegationTokenReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TCancelDelegationTokenReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} + +func (p *TCancelDelegationTokenReq) GetDelegationToken() string { + return p.DelegationToken +} +var TCancelDelegationTokenReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TCancelDelegationTokenReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TCancelDelegationTokenReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TCancelDelegationTokenReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TCancelDelegationTokenReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TCancelDelegationTokenReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetDelegationToken bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetDelegationToken = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetDelegationToken{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DelegationToken is not set")); + } + return nil +} + +func (p *TCancelDelegationTokenReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TCancelDelegationTokenReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DelegationToken = v +} + return nil +} + +func (p *TCancelDelegationTokenReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TCancelDelegationTokenReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCancelDelegationTokenReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCancelDelegationTokenReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TCancelDelegationTokenReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "delegationToken", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:delegationToken: ", p), err) } + if err := oprot.WriteString(ctx, string(p.DelegationToken)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.delegationToken (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:delegationToken: ", p), err) } + return err +} + +func (p *TCancelDelegationTokenReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:sessionConf: ", p), err) } + } + return err +} + +func (p *TCancelDelegationTokenReq) Equals(other *TCancelDelegationTokenReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.DelegationToken != other.DelegationToken { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TCancelDelegationTokenReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCancelDelegationTokenReq(%+v)", *p) +} + +func (p *TCancelDelegationTokenReq) Validate() error { + return nil +} +// Attributes: +// - Status +type TCancelDelegationTokenResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` +} + +func NewTCancelDelegationTokenResp() *TCancelDelegationTokenResp { + return &TCancelDelegationTokenResp{} +} + +var TCancelDelegationTokenResp_Status_DEFAULT *TStatus +func (p *TCancelDelegationTokenResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TCancelDelegationTokenResp_Status_DEFAULT + } +return p.Status +} +func (p *TCancelDelegationTokenResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TCancelDelegationTokenResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TCancelDelegationTokenResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TCancelDelegationTokenResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TCancelDelegationTokenResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCancelDelegationTokenResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TCancelDelegationTokenResp) Equals(other *TCancelDelegationTokenResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + return true +} + +func (p *TCancelDelegationTokenResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCancelDelegationTokenResp(%+v)", *p) +} + +func (p *TCancelDelegationTokenResp) Validate() error { + return nil +} +// Attributes: +// - SessionHandle +// - DelegationToken +// - SessionConf +type TRenewDelegationTokenReq struct { + SessionHandle *TSessionHandle `thrift:"sessionHandle,1,required" db:"sessionHandle" json:"sessionHandle"` + DelegationToken string `thrift:"delegationToken,2,required" db:"delegationToken" json:"delegationToken"` + // unused fields # 3 to 3328 + SessionConf *TDBSqlSessionConf `thrift:"sessionConf,3329" db:"sessionConf" json:"sessionConf,omitempty"` +} + +func NewTRenewDelegationTokenReq() *TRenewDelegationTokenReq { + return &TRenewDelegationTokenReq{} +} + +var TRenewDelegationTokenReq_SessionHandle_DEFAULT *TSessionHandle +func (p *TRenewDelegationTokenReq) GetSessionHandle() *TSessionHandle { + if !p.IsSetSessionHandle() { + return TRenewDelegationTokenReq_SessionHandle_DEFAULT + } +return p.SessionHandle +} + +func (p *TRenewDelegationTokenReq) GetDelegationToken() string { + return p.DelegationToken +} +var TRenewDelegationTokenReq_SessionConf_DEFAULT *TDBSqlSessionConf +func (p *TRenewDelegationTokenReq) GetSessionConf() *TDBSqlSessionConf { + if !p.IsSetSessionConf() { + return TRenewDelegationTokenReq_SessionConf_DEFAULT + } +return p.SessionConf +} +func (p *TRenewDelegationTokenReq) IsSetSessionHandle() bool { + return p.SessionHandle != nil +} + +func (p *TRenewDelegationTokenReq) IsSetSessionConf() bool { + return p.SessionConf != nil +} + +func (p *TRenewDelegationTokenReq) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSessionHandle bool = false; + var issetDelegationToken bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSessionHandle = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetDelegationToken = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3329: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3329(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSessionHandle{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SessionHandle is not set")); + } + if !issetDelegationToken{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DelegationToken is not set")); + } + return nil +} + +func (p *TRenewDelegationTokenReq) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionHandle = &TSessionHandle{} + if err := p.SessionHandle.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionHandle), err) + } + return nil +} + +func (p *TRenewDelegationTokenReq) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DelegationToken = v +} + return nil +} + +func (p *TRenewDelegationTokenReq) ReadField3329(ctx context.Context, iprot thrift.TProtocol) error { + p.SessionConf = &TDBSqlSessionConf{} + if err := p.SessionConf.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SessionConf), err) + } + return nil +} + +func (p *TRenewDelegationTokenReq) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TRenewDelegationTokenReq"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3329(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TRenewDelegationTokenReq) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "sessionHandle", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:sessionHandle: ", p), err) } + if err := p.SessionHandle.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionHandle), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:sessionHandle: ", p), err) } + return err +} + +func (p *TRenewDelegationTokenReq) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "delegationToken", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:delegationToken: ", p), err) } + if err := oprot.WriteString(ctx, string(p.DelegationToken)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.delegationToken (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:delegationToken: ", p), err) } + return err +} + +func (p *TRenewDelegationTokenReq) writeField3329(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSessionConf() { + if err := oprot.WriteFieldBegin(ctx, "sessionConf", thrift.STRUCT, 3329); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3329:sessionConf: ", p), err) } + if err := p.SessionConf.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SessionConf), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3329:sessionConf: ", p), err) } + } + return err +} + +func (p *TRenewDelegationTokenReq) Equals(other *TRenewDelegationTokenReq) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.SessionHandle.Equals(other.SessionHandle) { return false } + if p.DelegationToken != other.DelegationToken { return false } + if !p.SessionConf.Equals(other.SessionConf) { return false } + return true +} + +func (p *TRenewDelegationTokenReq) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRenewDelegationTokenReq(%+v)", *p) +} + +func (p *TRenewDelegationTokenReq) Validate() error { + return nil +} +// Attributes: +// - Status +type TRenewDelegationTokenResp struct { + Status *TStatus `thrift:"status,1,required" db:"status" json:"status"` +} + +func NewTRenewDelegationTokenResp() *TRenewDelegationTokenResp { + return &TRenewDelegationTokenResp{} +} + +var TRenewDelegationTokenResp_Status_DEFAULT *TStatus +func (p *TRenewDelegationTokenResp) GetStatus() *TStatus { + if !p.IsSetStatus() { + return TRenewDelegationTokenResp_Status_DEFAULT + } +return p.Status +} +func (p *TRenewDelegationTokenResp) IsSetStatus() bool { + return p.Status != nil +} + +func (p *TRenewDelegationTokenResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStatus bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + return nil +} + +func (p *TRenewDelegationTokenResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Status = &TStatus{} + if err := p.Status.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Status), err) + } + return nil +} + +func (p *TRenewDelegationTokenResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TRenewDelegationTokenResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TRenewDelegationTokenResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) } + if err := p.Status.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Status), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) } + return err +} + +func (p *TRenewDelegationTokenResp) Equals(other *TRenewDelegationTokenResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Status.Equals(other.Status) { return false } + return true +} + +func (p *TRenewDelegationTokenResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TRenewDelegationTokenResp(%+v)", *p) +} + +func (p *TRenewDelegationTokenResp) Validate() error { + return nil +} +// Attributes: +// - HeaderNames +// - Rows +// - ProgressedPercentage +// - Status +// - FooterSummary +// - StartTime +type TProgressUpdateResp struct { + HeaderNames []string `thrift:"headerNames,1,required" db:"headerNames" json:"headerNames"` + Rows [][]string `thrift:"rows,2,required" db:"rows" json:"rows"` + ProgressedPercentage float64 `thrift:"progressedPercentage,3,required" db:"progressedPercentage" json:"progressedPercentage"` + Status TJobExecutionStatus `thrift:"status,4,required" db:"status" json:"status"` + FooterSummary string `thrift:"footerSummary,5,required" db:"footerSummary" json:"footerSummary"` + StartTime int64 `thrift:"startTime,6,required" db:"startTime" json:"startTime"` +} + +func NewTProgressUpdateResp() *TProgressUpdateResp { + return &TProgressUpdateResp{} +} + + +func (p *TProgressUpdateResp) GetHeaderNames() []string { + return p.HeaderNames +} + +func (p *TProgressUpdateResp) GetRows() [][]string { + return p.Rows +} + +func (p *TProgressUpdateResp) GetProgressedPercentage() float64 { + return p.ProgressedPercentage +} + +func (p *TProgressUpdateResp) GetStatus() TJobExecutionStatus { + return p.Status +} + +func (p *TProgressUpdateResp) GetFooterSummary() string { + return p.FooterSummary +} + +func (p *TProgressUpdateResp) GetStartTime() int64 { + return p.StartTime +} +func (p *TProgressUpdateResp) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetHeaderNames bool = false; + var issetRows bool = false; + var issetProgressedPercentage bool = false; + var issetStatus bool = false; + var issetFooterSummary bool = false; + var issetStartTime bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetHeaderNames = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetRows = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetProgressedPercentage = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetStatus = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetFooterSummary = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + issetStartTime = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetHeaderNames{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field HeaderNames is not set")); + } + if !issetRows{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Rows is not set")); + } + if !issetProgressedPercentage{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProgressedPercentage is not set")); + } + if !issetStatus{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Status is not set")); + } + if !issetFooterSummary{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FooterSummary is not set")); + } + if !issetStartTime{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")); + } + return nil +} + +func (p *TProgressUpdateResp) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.HeaderNames = tSlice + for i := 0; i < size; i ++ { +var _elem88 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem88 = v +} + p.HeaderNames = append(p.HeaderNames, _elem88) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TProgressUpdateResp) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]string, 0, size) + p.Rows = tSlice + for i := 0; i < size; i ++ { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + _elem89 := tSlice + for i := 0; i < size; i ++ { +var _elem90 string + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem90 = v +} + _elem89 = append(_elem89, _elem90) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + p.Rows = append(p.Rows, _elem89) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *TProgressUpdateResp) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.ProgressedPercentage = v +} + return nil +} + +func (p *TProgressUpdateResp) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + temp := TJobExecutionStatus(v) + p.Status = temp +} + return nil +} + +func (p *TProgressUpdateResp) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.FooterSummary = v +} + return nil +} + +func (p *TProgressUpdateResp) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.StartTime = v +} + return nil +} + +func (p *TProgressUpdateResp) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "TProgressUpdateResp"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TProgressUpdateResp) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "headerNames", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:headerNames: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.HeaderNames)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HeaderNames { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:headerNames: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "rows", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:rows: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.LIST, len(p.Rows)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Rows { + if err := oprot.WriteListBegin(ctx, thrift.STRING, len(v)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range v { + if err := oprot.WriteString(ctx, string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:rows: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "progressedPercentage", thrift.DOUBLE, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progressedPercentage: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(p.ProgressedPercentage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progressedPercentage (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progressedPercentage: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "status", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:status: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:status: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "footerSummary", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:footerSummary: ", p), err) } + if err := oprot.WriteString(ctx, string(p.FooterSummary)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.footerSummary (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:footerSummary: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:startTime: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startTime (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:startTime: ", p), err) } + return err +} + +func (p *TProgressUpdateResp) Equals(other *TProgressUpdateResp) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if len(p.HeaderNames) != len(other.HeaderNames) { return false } + for i, _tgt := range p.HeaderNames { + _src91 := other.HeaderNames[i] + if _tgt != _src91 { return false } + } + if len(p.Rows) != len(other.Rows) { return false } + for i, _tgt := range p.Rows { + _src92 := other.Rows[i] + if len(_tgt) != len(_src92) { return false } + for i, _tgt := range _tgt { + _src93 := _src92[i] + if _tgt != _src93 { return false } + } + } + if p.ProgressedPercentage != other.ProgressedPercentage { return false } + if p.Status != other.Status { return false } + if p.FooterSummary != other.FooterSummary { return false } + if p.StartTime != other.StartTime { return false } + return true +} + +func (p *TProgressUpdateResp) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TProgressUpdateResp(%+v)", *p) +} + +func (p *TProgressUpdateResp) Validate() error { + return nil +} +type TCLIService interface { + // Parameters: + // - Req + OpenSession(ctx context.Context, req *TOpenSessionReq) (_r *TOpenSessionResp, _err error) + // Parameters: + // - Req + CloseSession(ctx context.Context, req *TCloseSessionReq) (_r *TCloseSessionResp, _err error) + // Parameters: + // - Req + GetInfo(ctx context.Context, req *TGetInfoReq) (_r *TGetInfoResp, _err error) + // Parameters: + // - Req + ExecuteStatement(ctx context.Context, req *TExecuteStatementReq) (_r *TExecuteStatementResp, _err error) + // Parameters: + // - Req + GetTypeInfo(ctx context.Context, req *TGetTypeInfoReq) (_r *TGetTypeInfoResp, _err error) + // Parameters: + // - Req + GetCatalogs(ctx context.Context, req *TGetCatalogsReq) (_r *TGetCatalogsResp, _err error) + // Parameters: + // - Req + GetSchemas(ctx context.Context, req *TGetSchemasReq) (_r *TGetSchemasResp, _err error) + // Parameters: + // - Req + GetTables(ctx context.Context, req *TGetTablesReq) (_r *TGetTablesResp, _err error) + // Parameters: + // - Req + GetTableTypes(ctx context.Context, req *TGetTableTypesReq) (_r *TGetTableTypesResp, _err error) + // Parameters: + // - Req + GetColumns(ctx context.Context, req *TGetColumnsReq) (_r *TGetColumnsResp, _err error) + // Parameters: + // - Req + GetFunctions(ctx context.Context, req *TGetFunctionsReq) (_r *TGetFunctionsResp, _err error) + // Parameters: + // - Req + GetPrimaryKeys(ctx context.Context, req *TGetPrimaryKeysReq) (_r *TGetPrimaryKeysResp, _err error) + // Parameters: + // - Req + GetCrossReference(ctx context.Context, req *TGetCrossReferenceReq) (_r *TGetCrossReferenceResp, _err error) + // Parameters: + // - Req + GetOperationStatus(ctx context.Context, req *TGetOperationStatusReq) (_r *TGetOperationStatusResp, _err error) + // Parameters: + // - Req + CancelOperation(ctx context.Context, req *TCancelOperationReq) (_r *TCancelOperationResp, _err error) + // Parameters: + // - Req + CloseOperation(ctx context.Context, req *TCloseOperationReq) (_r *TCloseOperationResp, _err error) + // Parameters: + // - Req + GetResultSetMetadata(ctx context.Context, req *TGetResultSetMetadataReq) (_r *TGetResultSetMetadataResp, _err error) + // Parameters: + // - Req + FetchResults(ctx context.Context, req *TFetchResultsReq) (_r *TFetchResultsResp, _err error) + // Parameters: + // - Req + GetDelegationToken(ctx context.Context, req *TGetDelegationTokenReq) (_r *TGetDelegationTokenResp, _err error) + // Parameters: + // - Req + CancelDelegationToken(ctx context.Context, req *TCancelDelegationTokenReq) (_r *TCancelDelegationTokenResp, _err error) + // Parameters: + // - Req + RenewDelegationToken(ctx context.Context, req *TRenewDelegationTokenReq) (_r *TRenewDelegationTokenResp, _err error) +} + +type TCLIServiceClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewTCLIServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *TCLIServiceClient { + return &TCLIServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewTCLIServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *TCLIServiceClient { + return &TCLIServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewTCLIServiceClient(c thrift.TClient) *TCLIServiceClient { + return &TCLIServiceClient{ + c: c, + } +} + +func (p *TCLIServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *TCLIServiceClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *TCLIServiceClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) OpenSession(ctx context.Context, req *TOpenSessionReq) (_r *TOpenSessionResp, _err error) { + var _args94 TCLIServiceOpenSessionArgs + _args94.Req = req + var _result96 TCLIServiceOpenSessionResult + var _meta95 thrift.ResponseMeta + _meta95, _err = p.Client_().Call(ctx, "OpenSession", &_args94, &_result96) + p.SetLastResponseMeta_(_meta95) + if _err != nil { + return + } + if _ret97 := _result96.GetSuccess(); _ret97 != nil { + return _ret97, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "OpenSession failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) CloseSession(ctx context.Context, req *TCloseSessionReq) (_r *TCloseSessionResp, _err error) { + var _args98 TCLIServiceCloseSessionArgs + _args98.Req = req + var _result100 TCLIServiceCloseSessionResult + var _meta99 thrift.ResponseMeta + _meta99, _err = p.Client_().Call(ctx, "CloseSession", &_args98, &_result100) + p.SetLastResponseMeta_(_meta99) + if _err != nil { + return + } + if _ret101 := _result100.GetSuccess(); _ret101 != nil { + return _ret101, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "CloseSession failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetInfo(ctx context.Context, req *TGetInfoReq) (_r *TGetInfoResp, _err error) { + var _args102 TCLIServiceGetInfoArgs + _args102.Req = req + var _result104 TCLIServiceGetInfoResult + var _meta103 thrift.ResponseMeta + _meta103, _err = p.Client_().Call(ctx, "GetInfo", &_args102, &_result104) + p.SetLastResponseMeta_(_meta103) + if _err != nil { + return + } + if _ret105 := _result104.GetSuccess(); _ret105 != nil { + return _ret105, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetInfo failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) ExecuteStatement(ctx context.Context, req *TExecuteStatementReq) (_r *TExecuteStatementResp, _err error) { + var _args106 TCLIServiceExecuteStatementArgs + _args106.Req = req + var _result108 TCLIServiceExecuteStatementResult + var _meta107 thrift.ResponseMeta + _meta107, _err = p.Client_().Call(ctx, "ExecuteStatement", &_args106, &_result108) + p.SetLastResponseMeta_(_meta107) + if _err != nil { + return + } + if _ret109 := _result108.GetSuccess(); _ret109 != nil { + return _ret109, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "ExecuteStatement failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetTypeInfo(ctx context.Context, req *TGetTypeInfoReq) (_r *TGetTypeInfoResp, _err error) { + var _args110 TCLIServiceGetTypeInfoArgs + _args110.Req = req + var _result112 TCLIServiceGetTypeInfoResult + var _meta111 thrift.ResponseMeta + _meta111, _err = p.Client_().Call(ctx, "GetTypeInfo", &_args110, &_result112) + p.SetLastResponseMeta_(_meta111) + if _err != nil { + return + } + if _ret113 := _result112.GetSuccess(); _ret113 != nil { + return _ret113, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetTypeInfo failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetCatalogs(ctx context.Context, req *TGetCatalogsReq) (_r *TGetCatalogsResp, _err error) { + var _args114 TCLIServiceGetCatalogsArgs + _args114.Req = req + var _result116 TCLIServiceGetCatalogsResult + var _meta115 thrift.ResponseMeta + _meta115, _err = p.Client_().Call(ctx, "GetCatalogs", &_args114, &_result116) + p.SetLastResponseMeta_(_meta115) + if _err != nil { + return + } + if _ret117 := _result116.GetSuccess(); _ret117 != nil { + return _ret117, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetCatalogs failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetSchemas(ctx context.Context, req *TGetSchemasReq) (_r *TGetSchemasResp, _err error) { + var _args118 TCLIServiceGetSchemasArgs + _args118.Req = req + var _result120 TCLIServiceGetSchemasResult + var _meta119 thrift.ResponseMeta + _meta119, _err = p.Client_().Call(ctx, "GetSchemas", &_args118, &_result120) + p.SetLastResponseMeta_(_meta119) + if _err != nil { + return + } + if _ret121 := _result120.GetSuccess(); _ret121 != nil { + return _ret121, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetSchemas failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetTables(ctx context.Context, req *TGetTablesReq) (_r *TGetTablesResp, _err error) { + var _args122 TCLIServiceGetTablesArgs + _args122.Req = req + var _result124 TCLIServiceGetTablesResult + var _meta123 thrift.ResponseMeta + _meta123, _err = p.Client_().Call(ctx, "GetTables", &_args122, &_result124) + p.SetLastResponseMeta_(_meta123) + if _err != nil { + return + } + if _ret125 := _result124.GetSuccess(); _ret125 != nil { + return _ret125, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetTables failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetTableTypes(ctx context.Context, req *TGetTableTypesReq) (_r *TGetTableTypesResp, _err error) { + var _args126 TCLIServiceGetTableTypesArgs + _args126.Req = req + var _result128 TCLIServiceGetTableTypesResult + var _meta127 thrift.ResponseMeta + _meta127, _err = p.Client_().Call(ctx, "GetTableTypes", &_args126, &_result128) + p.SetLastResponseMeta_(_meta127) + if _err != nil { + return + } + if _ret129 := _result128.GetSuccess(); _ret129 != nil { + return _ret129, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetTableTypes failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetColumns(ctx context.Context, req *TGetColumnsReq) (_r *TGetColumnsResp, _err error) { + var _args130 TCLIServiceGetColumnsArgs + _args130.Req = req + var _result132 TCLIServiceGetColumnsResult + var _meta131 thrift.ResponseMeta + _meta131, _err = p.Client_().Call(ctx, "GetColumns", &_args130, &_result132) + p.SetLastResponseMeta_(_meta131) + if _err != nil { + return + } + if _ret133 := _result132.GetSuccess(); _ret133 != nil { + return _ret133, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetColumns failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetFunctions(ctx context.Context, req *TGetFunctionsReq) (_r *TGetFunctionsResp, _err error) { + var _args134 TCLIServiceGetFunctionsArgs + _args134.Req = req + var _result136 TCLIServiceGetFunctionsResult + var _meta135 thrift.ResponseMeta + _meta135, _err = p.Client_().Call(ctx, "GetFunctions", &_args134, &_result136) + p.SetLastResponseMeta_(_meta135) + if _err != nil { + return + } + if _ret137 := _result136.GetSuccess(); _ret137 != nil { + return _ret137, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetFunctions failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetPrimaryKeys(ctx context.Context, req *TGetPrimaryKeysReq) (_r *TGetPrimaryKeysResp, _err error) { + var _args138 TCLIServiceGetPrimaryKeysArgs + _args138.Req = req + var _result140 TCLIServiceGetPrimaryKeysResult + var _meta139 thrift.ResponseMeta + _meta139, _err = p.Client_().Call(ctx, "GetPrimaryKeys", &_args138, &_result140) + p.SetLastResponseMeta_(_meta139) + if _err != nil { + return + } + if _ret141 := _result140.GetSuccess(); _ret141 != nil { + return _ret141, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetPrimaryKeys failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetCrossReference(ctx context.Context, req *TGetCrossReferenceReq) (_r *TGetCrossReferenceResp, _err error) { + var _args142 TCLIServiceGetCrossReferenceArgs + _args142.Req = req + var _result144 TCLIServiceGetCrossReferenceResult + var _meta143 thrift.ResponseMeta + _meta143, _err = p.Client_().Call(ctx, "GetCrossReference", &_args142, &_result144) + p.SetLastResponseMeta_(_meta143) + if _err != nil { + return + } + if _ret145 := _result144.GetSuccess(); _ret145 != nil { + return _ret145, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetCrossReference failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetOperationStatus(ctx context.Context, req *TGetOperationStatusReq) (_r *TGetOperationStatusResp, _err error) { + var _args146 TCLIServiceGetOperationStatusArgs + _args146.Req = req + var _result148 TCLIServiceGetOperationStatusResult + var _meta147 thrift.ResponseMeta + _meta147, _err = p.Client_().Call(ctx, "GetOperationStatus", &_args146, &_result148) + p.SetLastResponseMeta_(_meta147) + if _err != nil { + return + } + if _ret149 := _result148.GetSuccess(); _ret149 != nil { + return _ret149, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetOperationStatus failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) CancelOperation(ctx context.Context, req *TCancelOperationReq) (_r *TCancelOperationResp, _err error) { + var _args150 TCLIServiceCancelOperationArgs + _args150.Req = req + var _result152 TCLIServiceCancelOperationResult + var _meta151 thrift.ResponseMeta + _meta151, _err = p.Client_().Call(ctx, "CancelOperation", &_args150, &_result152) + p.SetLastResponseMeta_(_meta151) + if _err != nil { + return + } + if _ret153 := _result152.GetSuccess(); _ret153 != nil { + return _ret153, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "CancelOperation failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) CloseOperation(ctx context.Context, req *TCloseOperationReq) (_r *TCloseOperationResp, _err error) { + var _args154 TCLIServiceCloseOperationArgs + _args154.Req = req + var _result156 TCLIServiceCloseOperationResult + var _meta155 thrift.ResponseMeta + _meta155, _err = p.Client_().Call(ctx, "CloseOperation", &_args154, &_result156) + p.SetLastResponseMeta_(_meta155) + if _err != nil { + return + } + if _ret157 := _result156.GetSuccess(); _ret157 != nil { + return _ret157, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "CloseOperation failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetResultSetMetadata(ctx context.Context, req *TGetResultSetMetadataReq) (_r *TGetResultSetMetadataResp, _err error) { + var _args158 TCLIServiceGetResultSetMetadataArgs + _args158.Req = req + var _result160 TCLIServiceGetResultSetMetadataResult + var _meta159 thrift.ResponseMeta + _meta159, _err = p.Client_().Call(ctx, "GetResultSetMetadata", &_args158, &_result160) + p.SetLastResponseMeta_(_meta159) + if _err != nil { + return + } + if _ret161 := _result160.GetSuccess(); _ret161 != nil { + return _ret161, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetResultSetMetadata failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) FetchResults(ctx context.Context, req *TFetchResultsReq) (_r *TFetchResultsResp, _err error) { + var _args162 TCLIServiceFetchResultsArgs + _args162.Req = req + var _result164 TCLIServiceFetchResultsResult + var _meta163 thrift.ResponseMeta + _meta163, _err = p.Client_().Call(ctx, "FetchResults", &_args162, &_result164) + p.SetLastResponseMeta_(_meta163) + if _err != nil { + return + } + if _ret165 := _result164.GetSuccess(); _ret165 != nil { + return _ret165, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "FetchResults failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) GetDelegationToken(ctx context.Context, req *TGetDelegationTokenReq) (_r *TGetDelegationTokenResp, _err error) { + var _args166 TCLIServiceGetDelegationTokenArgs + _args166.Req = req + var _result168 TCLIServiceGetDelegationTokenResult + var _meta167 thrift.ResponseMeta + _meta167, _err = p.Client_().Call(ctx, "GetDelegationToken", &_args166, &_result168) + p.SetLastResponseMeta_(_meta167) + if _err != nil { + return + } + if _ret169 := _result168.GetSuccess(); _ret169 != nil { + return _ret169, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "GetDelegationToken failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) CancelDelegationToken(ctx context.Context, req *TCancelDelegationTokenReq) (_r *TCancelDelegationTokenResp, _err error) { + var _args170 TCLIServiceCancelDelegationTokenArgs + _args170.Req = req + var _result172 TCLIServiceCancelDelegationTokenResult + var _meta171 thrift.ResponseMeta + _meta171, _err = p.Client_().Call(ctx, "CancelDelegationToken", &_args170, &_result172) + p.SetLastResponseMeta_(_meta171) + if _err != nil { + return + } + if _ret173 := _result172.GetSuccess(); _ret173 != nil { + return _ret173, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "CancelDelegationToken failed: unknown result") +} + +// Parameters: +// - Req +func (p *TCLIServiceClient) RenewDelegationToken(ctx context.Context, req *TRenewDelegationTokenReq) (_r *TRenewDelegationTokenResp, _err error) { + var _args174 TCLIServiceRenewDelegationTokenArgs + _args174.Req = req + var _result176 TCLIServiceRenewDelegationTokenResult + var _meta175 thrift.ResponseMeta + _meta175, _err = p.Client_().Call(ctx, "RenewDelegationToken", &_args174, &_result176) + p.SetLastResponseMeta_(_meta175) + if _err != nil { + return + } + if _ret177 := _result176.GetSuccess(); _ret177 != nil { + return _ret177, nil + } + return nil, thrift.NewTApplicationException(thrift.MISSING_RESULT, "RenewDelegationToken failed: unknown result") +} + +type TCLIServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler TCLIService +} + +func (p *TCLIServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *TCLIServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *TCLIServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewTCLIServiceProcessor(handler TCLIService) *TCLIServiceProcessor { + + self178 := &TCLIServiceProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self178.processorMap["OpenSession"] = &tCLIServiceProcessorOpenSession{handler:handler} + self178.processorMap["CloseSession"] = &tCLIServiceProcessorCloseSession{handler:handler} + self178.processorMap["GetInfo"] = &tCLIServiceProcessorGetInfo{handler:handler} + self178.processorMap["ExecuteStatement"] = &tCLIServiceProcessorExecuteStatement{handler:handler} + self178.processorMap["GetTypeInfo"] = &tCLIServiceProcessorGetTypeInfo{handler:handler} + self178.processorMap["GetCatalogs"] = &tCLIServiceProcessorGetCatalogs{handler:handler} + self178.processorMap["GetSchemas"] = &tCLIServiceProcessorGetSchemas{handler:handler} + self178.processorMap["GetTables"] = &tCLIServiceProcessorGetTables{handler:handler} + self178.processorMap["GetTableTypes"] = &tCLIServiceProcessorGetTableTypes{handler:handler} + self178.processorMap["GetColumns"] = &tCLIServiceProcessorGetColumns{handler:handler} + self178.processorMap["GetFunctions"] = &tCLIServiceProcessorGetFunctions{handler:handler} + self178.processorMap["GetPrimaryKeys"] = &tCLIServiceProcessorGetPrimaryKeys{handler:handler} + self178.processorMap["GetCrossReference"] = &tCLIServiceProcessorGetCrossReference{handler:handler} + self178.processorMap["GetOperationStatus"] = &tCLIServiceProcessorGetOperationStatus{handler:handler} + self178.processorMap["CancelOperation"] = &tCLIServiceProcessorCancelOperation{handler:handler} + self178.processorMap["CloseOperation"] = &tCLIServiceProcessorCloseOperation{handler:handler} + self178.processorMap["GetResultSetMetadata"] = &tCLIServiceProcessorGetResultSetMetadata{handler:handler} + self178.processorMap["FetchResults"] = &tCLIServiceProcessorFetchResults{handler:handler} + self178.processorMap["GetDelegationToken"] = &tCLIServiceProcessorGetDelegationToken{handler:handler} + self178.processorMap["CancelDelegationToken"] = &tCLIServiceProcessorCancelDelegationToken{handler:handler} + self178.processorMap["RenewDelegationToken"] = &tCLIServiceProcessorRenewDelegationToken{handler:handler} +return self178 +} + +func (p *TCLIServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x179 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x179.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x179 + +} + +type tCLIServiceProcessorOpenSession struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorOpenSession) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err180 error + args := TCLIServiceOpenSessionArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "OpenSession", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceOpenSessionResult{} + if retval, err2 := p.handler.OpenSession(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc181 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing OpenSession: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "OpenSession", thrift.EXCEPTION, seqId); err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := _exc181.Write(ctx, oprot); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if _write_err180 != nil { + return false, thrift.WrapTException(_write_err180) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "OpenSession", thrift.REPLY, seqId); err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err180 == nil && err2 != nil { + _write_err180 = thrift.WrapTException(err2) + } + if _write_err180 != nil { + return false, thrift.WrapTException(_write_err180) + } + return true, err +} + +type tCLIServiceProcessorCloseSession struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorCloseSession) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err182 error + args := TCLIServiceCloseSessionArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "CloseSession", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceCloseSessionResult{} + if retval, err2 := p.handler.CloseSession(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc183 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CloseSession: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "CloseSession", thrift.EXCEPTION, seqId); err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := _exc183.Write(ctx, oprot); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if _write_err182 != nil { + return false, thrift.WrapTException(_write_err182) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "CloseSession", thrift.REPLY, seqId); err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err182 == nil && err2 != nil { + _write_err182 = thrift.WrapTException(err2) + } + if _write_err182 != nil { + return false, thrift.WrapTException(_write_err182) + } + return true, err +} + +type tCLIServiceProcessorGetInfo struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err184 error + args := TCLIServiceGetInfoArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetInfo", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetInfoResult{} + if retval, err2 := p.handler.GetInfo(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc185 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetInfo: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetInfo", thrift.EXCEPTION, seqId); err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := _exc185.Write(ctx, oprot); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if _write_err184 != nil { + return false, thrift.WrapTException(_write_err184) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetInfo", thrift.REPLY, seqId); err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err184 == nil && err2 != nil { + _write_err184 = thrift.WrapTException(err2) + } + if _write_err184 != nil { + return false, thrift.WrapTException(_write_err184) + } + return true, err +} + +type tCLIServiceProcessorExecuteStatement struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorExecuteStatement) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err186 error + args := TCLIServiceExecuteStatementArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "ExecuteStatement", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceExecuteStatementResult{} + if retval, err2 := p.handler.ExecuteStatement(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc187 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ExecuteStatement: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "ExecuteStatement", thrift.EXCEPTION, seqId); err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := _exc187.Write(ctx, oprot); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if _write_err186 != nil { + return false, thrift.WrapTException(_write_err186) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "ExecuteStatement", thrift.REPLY, seqId); err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err186 == nil && err2 != nil { + _write_err186 = thrift.WrapTException(err2) + } + if _write_err186 != nil { + return false, thrift.WrapTException(_write_err186) + } + return true, err +} + +type tCLIServiceProcessorGetTypeInfo struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetTypeInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err188 error + args := TCLIServiceGetTypeInfoArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetTypeInfo", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetTypeInfoResult{} + if retval, err2 := p.handler.GetTypeInfo(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc189 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTypeInfo: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetTypeInfo", thrift.EXCEPTION, seqId); err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := _exc189.Write(ctx, oprot); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if _write_err188 != nil { + return false, thrift.WrapTException(_write_err188) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetTypeInfo", thrift.REPLY, seqId); err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err188 == nil && err2 != nil { + _write_err188 = thrift.WrapTException(err2) + } + if _write_err188 != nil { + return false, thrift.WrapTException(_write_err188) + } + return true, err +} + +type tCLIServiceProcessorGetCatalogs struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetCatalogs) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err190 error + args := TCLIServiceGetCatalogsArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetCatalogs", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetCatalogsResult{} + if retval, err2 := p.handler.GetCatalogs(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc191 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetCatalogs: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetCatalogs", thrift.EXCEPTION, seqId); err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := _exc191.Write(ctx, oprot); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if _write_err190 != nil { + return false, thrift.WrapTException(_write_err190) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetCatalogs", thrift.REPLY, seqId); err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err190 == nil && err2 != nil { + _write_err190 = thrift.WrapTException(err2) + } + if _write_err190 != nil { + return false, thrift.WrapTException(_write_err190) + } + return true, err +} + +type tCLIServiceProcessorGetSchemas struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetSchemas) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err192 error + args := TCLIServiceGetSchemasArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetSchemas", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetSchemasResult{} + if retval, err2 := p.handler.GetSchemas(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc193 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetSchemas: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetSchemas", thrift.EXCEPTION, seqId); err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := _exc193.Write(ctx, oprot); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if _write_err192 != nil { + return false, thrift.WrapTException(_write_err192) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetSchemas", thrift.REPLY, seqId); err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err192 == nil && err2 != nil { + _write_err192 = thrift.WrapTException(err2) + } + if _write_err192 != nil { + return false, thrift.WrapTException(_write_err192) + } + return true, err +} + +type tCLIServiceProcessorGetTables struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetTables) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err194 error + args := TCLIServiceGetTablesArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetTables", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetTablesResult{} + if retval, err2 := p.handler.GetTables(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc195 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTables: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetTables", thrift.EXCEPTION, seqId); err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := _exc195.Write(ctx, oprot); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if _write_err194 != nil { + return false, thrift.WrapTException(_write_err194) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetTables", thrift.REPLY, seqId); err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err194 == nil && err2 != nil { + _write_err194 = thrift.WrapTException(err2) + } + if _write_err194 != nil { + return false, thrift.WrapTException(_write_err194) + } + return true, err +} + +type tCLIServiceProcessorGetTableTypes struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetTableTypes) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err196 error + args := TCLIServiceGetTableTypesArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetTableTypes", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetTableTypesResult{} + if retval, err2 := p.handler.GetTableTypes(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc197 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTableTypes: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetTableTypes", thrift.EXCEPTION, seqId); err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := _exc197.Write(ctx, oprot); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if _write_err196 != nil { + return false, thrift.WrapTException(_write_err196) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetTableTypes", thrift.REPLY, seqId); err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err196 == nil && err2 != nil { + _write_err196 = thrift.WrapTException(err2) + } + if _write_err196 != nil { + return false, thrift.WrapTException(_write_err196) + } + return true, err +} + +type tCLIServiceProcessorGetColumns struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetColumns) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err198 error + args := TCLIServiceGetColumnsArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetColumns", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetColumnsResult{} + if retval, err2 := p.handler.GetColumns(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc199 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetColumns: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetColumns", thrift.EXCEPTION, seqId); err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := _exc199.Write(ctx, oprot); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if _write_err198 != nil { + return false, thrift.WrapTException(_write_err198) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetColumns", thrift.REPLY, seqId); err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err198 == nil && err2 != nil { + _write_err198 = thrift.WrapTException(err2) + } + if _write_err198 != nil { + return false, thrift.WrapTException(_write_err198) + } + return true, err +} + +type tCLIServiceProcessorGetFunctions struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetFunctions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err200 error + args := TCLIServiceGetFunctionsArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetFunctions", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetFunctionsResult{} + if retval, err2 := p.handler.GetFunctions(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc201 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetFunctions: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetFunctions", thrift.EXCEPTION, seqId); err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := _exc201.Write(ctx, oprot); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if _write_err200 != nil { + return false, thrift.WrapTException(_write_err200) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetFunctions", thrift.REPLY, seqId); err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err200 == nil && err2 != nil { + _write_err200 = thrift.WrapTException(err2) + } + if _write_err200 != nil { + return false, thrift.WrapTException(_write_err200) + } + return true, err +} + +type tCLIServiceProcessorGetPrimaryKeys struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetPrimaryKeys) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err202 error + args := TCLIServiceGetPrimaryKeysArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetPrimaryKeys", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetPrimaryKeysResult{} + if retval, err2 := p.handler.GetPrimaryKeys(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc203 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetPrimaryKeys: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetPrimaryKeys", thrift.EXCEPTION, seqId); err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := _exc203.Write(ctx, oprot); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if _write_err202 != nil { + return false, thrift.WrapTException(_write_err202) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetPrimaryKeys", thrift.REPLY, seqId); err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err202 == nil && err2 != nil { + _write_err202 = thrift.WrapTException(err2) + } + if _write_err202 != nil { + return false, thrift.WrapTException(_write_err202) + } + return true, err +} + +type tCLIServiceProcessorGetCrossReference struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetCrossReference) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err204 error + args := TCLIServiceGetCrossReferenceArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetCrossReference", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetCrossReferenceResult{} + if retval, err2 := p.handler.GetCrossReference(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc205 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetCrossReference: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetCrossReference", thrift.EXCEPTION, seqId); err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := _exc205.Write(ctx, oprot); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if _write_err204 != nil { + return false, thrift.WrapTException(_write_err204) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetCrossReference", thrift.REPLY, seqId); err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err204 == nil && err2 != nil { + _write_err204 = thrift.WrapTException(err2) + } + if _write_err204 != nil { + return false, thrift.WrapTException(_write_err204) + } + return true, err +} + +type tCLIServiceProcessorGetOperationStatus struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetOperationStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err206 error + args := TCLIServiceGetOperationStatusArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetOperationStatus", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetOperationStatusResult{} + if retval, err2 := p.handler.GetOperationStatus(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc207 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetOperationStatus: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetOperationStatus", thrift.EXCEPTION, seqId); err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := _exc207.Write(ctx, oprot); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if _write_err206 != nil { + return false, thrift.WrapTException(_write_err206) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetOperationStatus", thrift.REPLY, seqId); err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err206 == nil && err2 != nil { + _write_err206 = thrift.WrapTException(err2) + } + if _write_err206 != nil { + return false, thrift.WrapTException(_write_err206) + } + return true, err +} + +type tCLIServiceProcessorCancelOperation struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorCancelOperation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err208 error + args := TCLIServiceCancelOperationArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "CancelOperation", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceCancelOperationResult{} + if retval, err2 := p.handler.CancelOperation(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc209 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CancelOperation: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "CancelOperation", thrift.EXCEPTION, seqId); err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := _exc209.Write(ctx, oprot); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if _write_err208 != nil { + return false, thrift.WrapTException(_write_err208) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "CancelOperation", thrift.REPLY, seqId); err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err208 == nil && err2 != nil { + _write_err208 = thrift.WrapTException(err2) + } + if _write_err208 != nil { + return false, thrift.WrapTException(_write_err208) + } + return true, err +} + +type tCLIServiceProcessorCloseOperation struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorCloseOperation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err210 error + args := TCLIServiceCloseOperationArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "CloseOperation", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceCloseOperationResult{} + if retval, err2 := p.handler.CloseOperation(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc211 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CloseOperation: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "CloseOperation", thrift.EXCEPTION, seqId); err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := _exc211.Write(ctx, oprot); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if _write_err210 != nil { + return false, thrift.WrapTException(_write_err210) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "CloseOperation", thrift.REPLY, seqId); err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err210 == nil && err2 != nil { + _write_err210 = thrift.WrapTException(err2) + } + if _write_err210 != nil { + return false, thrift.WrapTException(_write_err210) + } + return true, err +} + +type tCLIServiceProcessorGetResultSetMetadata struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetResultSetMetadata) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err212 error + args := TCLIServiceGetResultSetMetadataArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetResultSetMetadata", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetResultSetMetadataResult{} + if retval, err2 := p.handler.GetResultSetMetadata(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc213 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetResultSetMetadata: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetResultSetMetadata", thrift.EXCEPTION, seqId); err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := _exc213.Write(ctx, oprot); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if _write_err212 != nil { + return false, thrift.WrapTException(_write_err212) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetResultSetMetadata", thrift.REPLY, seqId); err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err212 == nil && err2 != nil { + _write_err212 = thrift.WrapTException(err2) + } + if _write_err212 != nil { + return false, thrift.WrapTException(_write_err212) + } + return true, err +} + +type tCLIServiceProcessorFetchResults struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorFetchResults) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err214 error + args := TCLIServiceFetchResultsArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "FetchResults", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceFetchResultsResult{} + if retval, err2 := p.handler.FetchResults(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc215 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing FetchResults: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "FetchResults", thrift.EXCEPTION, seqId); err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := _exc215.Write(ctx, oprot); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if _write_err214 != nil { + return false, thrift.WrapTException(_write_err214) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "FetchResults", thrift.REPLY, seqId); err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err214 == nil && err2 != nil { + _write_err214 = thrift.WrapTException(err2) + } + if _write_err214 != nil { + return false, thrift.WrapTException(_write_err214) + } + return true, err +} + +type tCLIServiceProcessorGetDelegationToken struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorGetDelegationToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err216 error + args := TCLIServiceGetDelegationTokenArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "GetDelegationToken", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceGetDelegationTokenResult{} + if retval, err2 := p.handler.GetDelegationToken(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc217 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetDelegationToken: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "GetDelegationToken", thrift.EXCEPTION, seqId); err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := _exc217.Write(ctx, oprot); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if _write_err216 != nil { + return false, thrift.WrapTException(_write_err216) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "GetDelegationToken", thrift.REPLY, seqId); err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err216 == nil && err2 != nil { + _write_err216 = thrift.WrapTException(err2) + } + if _write_err216 != nil { + return false, thrift.WrapTException(_write_err216) + } + return true, err +} + +type tCLIServiceProcessorCancelDelegationToken struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorCancelDelegationToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err218 error + args := TCLIServiceCancelDelegationTokenArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "CancelDelegationToken", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceCancelDelegationTokenResult{} + if retval, err2 := p.handler.CancelDelegationToken(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc219 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CancelDelegationToken: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "CancelDelegationToken", thrift.EXCEPTION, seqId); err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := _exc219.Write(ctx, oprot); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if _write_err218 != nil { + return false, thrift.WrapTException(_write_err218) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "CancelDelegationToken", thrift.REPLY, seqId); err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err218 == nil && err2 != nil { + _write_err218 = thrift.WrapTException(err2) + } + if _write_err218 != nil { + return false, thrift.WrapTException(_write_err218) + } + return true, err +} + +type tCLIServiceProcessorRenewDelegationToken struct { + handler TCLIService +} + +func (p *tCLIServiceProcessorRenewDelegationToken) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + var _write_err220 error + args := TCLIServiceRenewDelegationTokenArgs{} + if err2 := args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "RenewDelegationToken", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + defer cancel(nil) + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelCauseFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel(thrift.ErrAbandonRequest) + return + } + } + } + }(tickerCtx, cancel) + } + + result := TCLIServiceRenewDelegationTokenResult{} + if retval, err2 := p.handler.RenewDelegationToken(ctx, args.Req); err2 != nil { + tickerCancel() + err = thrift.WrapTException(err2) + if errors.Is(err2, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err2) + } + if errors.Is(err2, context.Canceled) { + if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { + return false, thrift.WrapTException(err) + } + } + _exc221 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing RenewDelegationToken: " + err2.Error()) + if err2 := oprot.WriteMessageBegin(ctx, "RenewDelegationToken", thrift.EXCEPTION, seqId); err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := _exc221.Write(ctx, oprot); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if _write_err220 != nil { + return false, thrift.WrapTException(_write_err220) + } + return true, err + } else { + result.Success = retval + } + tickerCancel() + if err2 := oprot.WriteMessageBegin(ctx, "RenewDelegationToken", thrift.REPLY, seqId); err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := result.Write(ctx, oprot); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := oprot.WriteMessageEnd(ctx); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if err2 := oprot.Flush(ctx); _write_err220 == nil && err2 != nil { + _write_err220 = thrift.WrapTException(err2) + } + if _write_err220 != nil { + return false, thrift.WrapTException(_write_err220) + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Req +type TCLIServiceOpenSessionArgs struct { + Req *TOpenSessionReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceOpenSessionArgs() *TCLIServiceOpenSessionArgs { + return &TCLIServiceOpenSessionArgs{} +} + +var TCLIServiceOpenSessionArgs_Req_DEFAULT *TOpenSessionReq +func (p *TCLIServiceOpenSessionArgs) GetReq() *TOpenSessionReq { + if !p.IsSetReq() { + return TCLIServiceOpenSessionArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceOpenSessionArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceOpenSessionArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceOpenSessionArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TOpenSessionReq{ + ClientProtocol: -7, +} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceOpenSessionArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "OpenSession_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceOpenSessionArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceOpenSessionArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceOpenSessionArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceOpenSessionResult struct { + Success *TOpenSessionResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceOpenSessionResult() *TCLIServiceOpenSessionResult { + return &TCLIServiceOpenSessionResult{} +} + +var TCLIServiceOpenSessionResult_Success_DEFAULT *TOpenSessionResp +func (p *TCLIServiceOpenSessionResult) GetSuccess() *TOpenSessionResp { + if !p.IsSetSuccess() { + return TCLIServiceOpenSessionResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceOpenSessionResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceOpenSessionResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceOpenSessionResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TOpenSessionResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceOpenSessionResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "OpenSession_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceOpenSessionResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceOpenSessionResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceOpenSessionResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceCloseSessionArgs struct { + Req *TCloseSessionReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceCloseSessionArgs() *TCLIServiceCloseSessionArgs { + return &TCLIServiceCloseSessionArgs{} +} + +var TCLIServiceCloseSessionArgs_Req_DEFAULT *TCloseSessionReq +func (p *TCLIServiceCloseSessionArgs) GetReq() *TCloseSessionReq { + if !p.IsSetReq() { + return TCLIServiceCloseSessionArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceCloseSessionArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceCloseSessionArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCloseSessionArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TCloseSessionReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceCloseSessionArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CloseSession_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCloseSessionArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceCloseSessionArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCloseSessionArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceCloseSessionResult struct { + Success *TCloseSessionResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceCloseSessionResult() *TCLIServiceCloseSessionResult { + return &TCLIServiceCloseSessionResult{} +} + +var TCLIServiceCloseSessionResult_Success_DEFAULT *TCloseSessionResp +func (p *TCLIServiceCloseSessionResult) GetSuccess() *TCloseSessionResp { + if !p.IsSetSuccess() { + return TCLIServiceCloseSessionResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceCloseSessionResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceCloseSessionResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCloseSessionResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TCloseSessionResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceCloseSessionResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CloseSession_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCloseSessionResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceCloseSessionResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCloseSessionResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetInfoArgs struct { + Req *TGetInfoReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetInfoArgs() *TCLIServiceGetInfoArgs { + return &TCLIServiceGetInfoArgs{} +} + +var TCLIServiceGetInfoArgs_Req_DEFAULT *TGetInfoReq +func (p *TCLIServiceGetInfoArgs) GetReq() *TGetInfoReq { + if !p.IsSetReq() { + return TCLIServiceGetInfoArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetInfoArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetInfoArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetInfoReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetInfoArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetInfo_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetInfoArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetInfoResult struct { + Success *TGetInfoResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetInfoResult() *TCLIServiceGetInfoResult { + return &TCLIServiceGetInfoResult{} +} + +var TCLIServiceGetInfoResult_Success_DEFAULT *TGetInfoResp +func (p *TCLIServiceGetInfoResult) GetSuccess() *TGetInfoResp { + if !p.IsSetSuccess() { + return TCLIServiceGetInfoResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetInfoResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetInfoResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetInfoResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetInfoResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetInfo_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetInfoResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceExecuteStatementArgs struct { + Req *TExecuteStatementReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceExecuteStatementArgs() *TCLIServiceExecuteStatementArgs { + return &TCLIServiceExecuteStatementArgs{} +} + +var TCLIServiceExecuteStatementArgs_Req_DEFAULT *TExecuteStatementReq +func (p *TCLIServiceExecuteStatementArgs) GetReq() *TExecuteStatementReq { + if !p.IsSetReq() { + return TCLIServiceExecuteStatementArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceExecuteStatementArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceExecuteStatementArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceExecuteStatementArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TExecuteStatementReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceExecuteStatementArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ExecuteStatement_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceExecuteStatementArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceExecuteStatementArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceExecuteStatementArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceExecuteStatementResult struct { + Success *TExecuteStatementResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceExecuteStatementResult() *TCLIServiceExecuteStatementResult { + return &TCLIServiceExecuteStatementResult{} +} + +var TCLIServiceExecuteStatementResult_Success_DEFAULT *TExecuteStatementResp +func (p *TCLIServiceExecuteStatementResult) GetSuccess() *TExecuteStatementResp { + if !p.IsSetSuccess() { + return TCLIServiceExecuteStatementResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceExecuteStatementResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceExecuteStatementResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceExecuteStatementResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TExecuteStatementResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceExecuteStatementResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ExecuteStatement_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceExecuteStatementResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceExecuteStatementResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceExecuteStatementResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetTypeInfoArgs struct { + Req *TGetTypeInfoReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetTypeInfoArgs() *TCLIServiceGetTypeInfoArgs { + return &TCLIServiceGetTypeInfoArgs{} +} + +var TCLIServiceGetTypeInfoArgs_Req_DEFAULT *TGetTypeInfoReq +func (p *TCLIServiceGetTypeInfoArgs) GetReq() *TGetTypeInfoReq { + if !p.IsSetReq() { + return TCLIServiceGetTypeInfoArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetTypeInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetTypeInfoArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTypeInfoArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetTypeInfoReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetTypeInfoArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTypeInfo_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTypeInfoArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetTypeInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTypeInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetTypeInfoResult struct { + Success *TGetTypeInfoResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetTypeInfoResult() *TCLIServiceGetTypeInfoResult { + return &TCLIServiceGetTypeInfoResult{} +} + +var TCLIServiceGetTypeInfoResult_Success_DEFAULT *TGetTypeInfoResp +func (p *TCLIServiceGetTypeInfoResult) GetSuccess() *TGetTypeInfoResp { + if !p.IsSetSuccess() { + return TCLIServiceGetTypeInfoResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetTypeInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetTypeInfoResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTypeInfoResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetTypeInfoResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetTypeInfoResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTypeInfo_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTypeInfoResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetTypeInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTypeInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetCatalogsArgs struct { + Req *TGetCatalogsReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetCatalogsArgs() *TCLIServiceGetCatalogsArgs { + return &TCLIServiceGetCatalogsArgs{} +} + +var TCLIServiceGetCatalogsArgs_Req_DEFAULT *TGetCatalogsReq +func (p *TCLIServiceGetCatalogsArgs) GetReq() *TGetCatalogsReq { + if !p.IsSetReq() { + return TCLIServiceGetCatalogsArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetCatalogsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetCatalogsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetCatalogsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetCatalogsReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetCatalogsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetCatalogs_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetCatalogsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetCatalogsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetCatalogsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetCatalogsResult struct { + Success *TGetCatalogsResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetCatalogsResult() *TCLIServiceGetCatalogsResult { + return &TCLIServiceGetCatalogsResult{} +} + +var TCLIServiceGetCatalogsResult_Success_DEFAULT *TGetCatalogsResp +func (p *TCLIServiceGetCatalogsResult) GetSuccess() *TGetCatalogsResp { + if !p.IsSetSuccess() { + return TCLIServiceGetCatalogsResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetCatalogsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetCatalogsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetCatalogsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetCatalogsResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetCatalogsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetCatalogs_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetCatalogsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetCatalogsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetCatalogsResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetSchemasArgs struct { + Req *TGetSchemasReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetSchemasArgs() *TCLIServiceGetSchemasArgs { + return &TCLIServiceGetSchemasArgs{} +} + +var TCLIServiceGetSchemasArgs_Req_DEFAULT *TGetSchemasReq +func (p *TCLIServiceGetSchemasArgs) GetReq() *TGetSchemasReq { + if !p.IsSetReq() { + return TCLIServiceGetSchemasArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetSchemasArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetSchemasArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetSchemasArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetSchemasReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetSchemasArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetSchemas_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetSchemasArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetSchemasArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetSchemasArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetSchemasResult struct { + Success *TGetSchemasResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetSchemasResult() *TCLIServiceGetSchemasResult { + return &TCLIServiceGetSchemasResult{} +} + +var TCLIServiceGetSchemasResult_Success_DEFAULT *TGetSchemasResp +func (p *TCLIServiceGetSchemasResult) GetSuccess() *TGetSchemasResp { + if !p.IsSetSuccess() { + return TCLIServiceGetSchemasResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetSchemasResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetSchemasResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetSchemasResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetSchemasResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetSchemasResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetSchemas_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetSchemasResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetSchemasResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetSchemasResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetTablesArgs struct { + Req *TGetTablesReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetTablesArgs() *TCLIServiceGetTablesArgs { + return &TCLIServiceGetTablesArgs{} +} + +var TCLIServiceGetTablesArgs_Req_DEFAULT *TGetTablesReq +func (p *TCLIServiceGetTablesArgs) GetReq() *TGetTablesReq { + if !p.IsSetReq() { + return TCLIServiceGetTablesArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetTablesArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetTablesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTablesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetTablesReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetTablesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTables_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTablesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetTablesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTablesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetTablesResult struct { + Success *TGetTablesResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetTablesResult() *TCLIServiceGetTablesResult { + return &TCLIServiceGetTablesResult{} +} + +var TCLIServiceGetTablesResult_Success_DEFAULT *TGetTablesResp +func (p *TCLIServiceGetTablesResult) GetSuccess() *TGetTablesResp { + if !p.IsSetSuccess() { + return TCLIServiceGetTablesResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetTablesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetTablesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTablesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetTablesResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetTablesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTables_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTablesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetTablesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTablesResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetTableTypesArgs struct { + Req *TGetTableTypesReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetTableTypesArgs() *TCLIServiceGetTableTypesArgs { + return &TCLIServiceGetTableTypesArgs{} +} + +var TCLIServiceGetTableTypesArgs_Req_DEFAULT *TGetTableTypesReq +func (p *TCLIServiceGetTableTypesArgs) GetReq() *TGetTableTypesReq { + if !p.IsSetReq() { + return TCLIServiceGetTableTypesArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetTableTypesArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetTableTypesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTableTypesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetTableTypesReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetTableTypesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTableTypes_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTableTypesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetTableTypesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTableTypesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetTableTypesResult struct { + Success *TGetTableTypesResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetTableTypesResult() *TCLIServiceGetTableTypesResult { + return &TCLIServiceGetTableTypesResult{} +} + +var TCLIServiceGetTableTypesResult_Success_DEFAULT *TGetTableTypesResp +func (p *TCLIServiceGetTableTypesResult) GetSuccess() *TGetTableTypesResp { + if !p.IsSetSuccess() { + return TCLIServiceGetTableTypesResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetTableTypesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetTableTypesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetTableTypesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetTableTypesResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetTableTypesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetTableTypes_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetTableTypesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetTableTypesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetTableTypesResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetColumnsArgs struct { + Req *TGetColumnsReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetColumnsArgs() *TCLIServiceGetColumnsArgs { + return &TCLIServiceGetColumnsArgs{} +} + +var TCLIServiceGetColumnsArgs_Req_DEFAULT *TGetColumnsReq +func (p *TCLIServiceGetColumnsArgs) GetReq() *TGetColumnsReq { + if !p.IsSetReq() { + return TCLIServiceGetColumnsArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetColumnsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetColumnsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetColumnsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetColumnsReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetColumnsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetColumns_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetColumnsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetColumnsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetColumnsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetColumnsResult struct { + Success *TGetColumnsResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetColumnsResult() *TCLIServiceGetColumnsResult { + return &TCLIServiceGetColumnsResult{} +} + +var TCLIServiceGetColumnsResult_Success_DEFAULT *TGetColumnsResp +func (p *TCLIServiceGetColumnsResult) GetSuccess() *TGetColumnsResp { + if !p.IsSetSuccess() { + return TCLIServiceGetColumnsResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetColumnsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetColumnsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetColumnsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetColumnsResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetColumnsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetColumns_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetColumnsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetColumnsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetColumnsResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetFunctionsArgs struct { + Req *TGetFunctionsReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetFunctionsArgs() *TCLIServiceGetFunctionsArgs { + return &TCLIServiceGetFunctionsArgs{} +} + +var TCLIServiceGetFunctionsArgs_Req_DEFAULT *TGetFunctionsReq +func (p *TCLIServiceGetFunctionsArgs) GetReq() *TGetFunctionsReq { + if !p.IsSetReq() { + return TCLIServiceGetFunctionsArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetFunctionsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetFunctionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetFunctionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetFunctionsReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetFunctionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetFunctions_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetFunctionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetFunctionsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetFunctionsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetFunctionsResult struct { + Success *TGetFunctionsResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetFunctionsResult() *TCLIServiceGetFunctionsResult { + return &TCLIServiceGetFunctionsResult{} +} + +var TCLIServiceGetFunctionsResult_Success_DEFAULT *TGetFunctionsResp +func (p *TCLIServiceGetFunctionsResult) GetSuccess() *TGetFunctionsResp { + if !p.IsSetSuccess() { + return TCLIServiceGetFunctionsResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetFunctionsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetFunctionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetFunctionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetFunctionsResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetFunctionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetFunctions_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetFunctionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetFunctionsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetFunctionsResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetPrimaryKeysArgs struct { + Req *TGetPrimaryKeysReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetPrimaryKeysArgs() *TCLIServiceGetPrimaryKeysArgs { + return &TCLIServiceGetPrimaryKeysArgs{} +} + +var TCLIServiceGetPrimaryKeysArgs_Req_DEFAULT *TGetPrimaryKeysReq +func (p *TCLIServiceGetPrimaryKeysArgs) GetReq() *TGetPrimaryKeysReq { + if !p.IsSetReq() { + return TCLIServiceGetPrimaryKeysArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetPrimaryKeysArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetPrimaryKeysArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetPrimaryKeysReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetPrimaryKeys_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetPrimaryKeysArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetPrimaryKeysArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetPrimaryKeysResult struct { + Success *TGetPrimaryKeysResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetPrimaryKeysResult() *TCLIServiceGetPrimaryKeysResult { + return &TCLIServiceGetPrimaryKeysResult{} +} + +var TCLIServiceGetPrimaryKeysResult_Success_DEFAULT *TGetPrimaryKeysResp +func (p *TCLIServiceGetPrimaryKeysResult) GetSuccess() *TGetPrimaryKeysResp { + if !p.IsSetSuccess() { + return TCLIServiceGetPrimaryKeysResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetPrimaryKeysResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetPrimaryKeysResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetPrimaryKeysResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetPrimaryKeys_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetPrimaryKeysResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetPrimaryKeysResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetPrimaryKeysResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetCrossReferenceArgs struct { + Req *TGetCrossReferenceReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetCrossReferenceArgs() *TCLIServiceGetCrossReferenceArgs { + return &TCLIServiceGetCrossReferenceArgs{} +} + +var TCLIServiceGetCrossReferenceArgs_Req_DEFAULT *TGetCrossReferenceReq +func (p *TCLIServiceGetCrossReferenceArgs) GetReq() *TGetCrossReferenceReq { + if !p.IsSetReq() { + return TCLIServiceGetCrossReferenceArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetCrossReferenceArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetCrossReferenceArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetCrossReferenceArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetCrossReferenceReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetCrossReferenceArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetCrossReference_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetCrossReferenceArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetCrossReferenceArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetCrossReferenceArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetCrossReferenceResult struct { + Success *TGetCrossReferenceResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetCrossReferenceResult() *TCLIServiceGetCrossReferenceResult { + return &TCLIServiceGetCrossReferenceResult{} +} + +var TCLIServiceGetCrossReferenceResult_Success_DEFAULT *TGetCrossReferenceResp +func (p *TCLIServiceGetCrossReferenceResult) GetSuccess() *TGetCrossReferenceResp { + if !p.IsSetSuccess() { + return TCLIServiceGetCrossReferenceResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetCrossReferenceResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetCrossReferenceResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetCrossReferenceResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetCrossReferenceResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetCrossReferenceResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetCrossReference_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetCrossReferenceResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetCrossReferenceResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetCrossReferenceResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetOperationStatusArgs struct { + Req *TGetOperationStatusReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetOperationStatusArgs() *TCLIServiceGetOperationStatusArgs { + return &TCLIServiceGetOperationStatusArgs{} +} + +var TCLIServiceGetOperationStatusArgs_Req_DEFAULT *TGetOperationStatusReq +func (p *TCLIServiceGetOperationStatusArgs) GetReq() *TGetOperationStatusReq { + if !p.IsSetReq() { + return TCLIServiceGetOperationStatusArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetOperationStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetOperationStatusArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetOperationStatusArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetOperationStatusReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetOperationStatusArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetOperationStatus_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetOperationStatusArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetOperationStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetOperationStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetOperationStatusResult struct { + Success *TGetOperationStatusResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetOperationStatusResult() *TCLIServiceGetOperationStatusResult { + return &TCLIServiceGetOperationStatusResult{} +} + +var TCLIServiceGetOperationStatusResult_Success_DEFAULT *TGetOperationStatusResp +func (p *TCLIServiceGetOperationStatusResult) GetSuccess() *TGetOperationStatusResp { + if !p.IsSetSuccess() { + return TCLIServiceGetOperationStatusResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetOperationStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetOperationStatusResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetOperationStatusResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetOperationStatusResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetOperationStatusResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetOperationStatus_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetOperationStatusResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetOperationStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetOperationStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceCancelOperationArgs struct { + Req *TCancelOperationReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceCancelOperationArgs() *TCLIServiceCancelOperationArgs { + return &TCLIServiceCancelOperationArgs{} +} + +var TCLIServiceCancelOperationArgs_Req_DEFAULT *TCancelOperationReq +func (p *TCLIServiceCancelOperationArgs) GetReq() *TCancelOperationReq { + if !p.IsSetReq() { + return TCLIServiceCancelOperationArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceCancelOperationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceCancelOperationArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCancelOperationArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TCancelOperationReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceCancelOperationArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CancelOperation_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCancelOperationArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceCancelOperationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCancelOperationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceCancelOperationResult struct { + Success *TCancelOperationResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceCancelOperationResult() *TCLIServiceCancelOperationResult { + return &TCLIServiceCancelOperationResult{} +} + +var TCLIServiceCancelOperationResult_Success_DEFAULT *TCancelOperationResp +func (p *TCLIServiceCancelOperationResult) GetSuccess() *TCancelOperationResp { + if !p.IsSetSuccess() { + return TCLIServiceCancelOperationResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceCancelOperationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceCancelOperationResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCancelOperationResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TCancelOperationResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceCancelOperationResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CancelOperation_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCancelOperationResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceCancelOperationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCancelOperationResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceCloseOperationArgs struct { + Req *TCloseOperationReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceCloseOperationArgs() *TCLIServiceCloseOperationArgs { + return &TCLIServiceCloseOperationArgs{} +} + +var TCLIServiceCloseOperationArgs_Req_DEFAULT *TCloseOperationReq +func (p *TCLIServiceCloseOperationArgs) GetReq() *TCloseOperationReq { + if !p.IsSetReq() { + return TCLIServiceCloseOperationArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceCloseOperationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceCloseOperationArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCloseOperationArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TCloseOperationReq{ + CloseReason: 0, +} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceCloseOperationArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CloseOperation_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCloseOperationArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceCloseOperationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCloseOperationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceCloseOperationResult struct { + Success *TCloseOperationResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceCloseOperationResult() *TCLIServiceCloseOperationResult { + return &TCLIServiceCloseOperationResult{} +} + +var TCLIServiceCloseOperationResult_Success_DEFAULT *TCloseOperationResp +func (p *TCLIServiceCloseOperationResult) GetSuccess() *TCloseOperationResp { + if !p.IsSetSuccess() { + return TCLIServiceCloseOperationResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceCloseOperationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceCloseOperationResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCloseOperationResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TCloseOperationResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceCloseOperationResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CloseOperation_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCloseOperationResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceCloseOperationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCloseOperationResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetResultSetMetadataArgs struct { + Req *TGetResultSetMetadataReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetResultSetMetadataArgs() *TCLIServiceGetResultSetMetadataArgs { + return &TCLIServiceGetResultSetMetadataArgs{} +} + +var TCLIServiceGetResultSetMetadataArgs_Req_DEFAULT *TGetResultSetMetadataReq +func (p *TCLIServiceGetResultSetMetadataArgs) GetReq() *TGetResultSetMetadataReq { + if !p.IsSetReq() { + return TCLIServiceGetResultSetMetadataArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetResultSetMetadataArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetResultSetMetadataArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetResultSetMetadataReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetResultSetMetadata_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetResultSetMetadataArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetResultSetMetadataArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetResultSetMetadataResult struct { + Success *TGetResultSetMetadataResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetResultSetMetadataResult() *TCLIServiceGetResultSetMetadataResult { + return &TCLIServiceGetResultSetMetadataResult{} +} + +var TCLIServiceGetResultSetMetadataResult_Success_DEFAULT *TGetResultSetMetadataResp +func (p *TCLIServiceGetResultSetMetadataResult) GetSuccess() *TGetResultSetMetadataResp { + if !p.IsSetSuccess() { + return TCLIServiceGetResultSetMetadataResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetResultSetMetadataResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetResultSetMetadataResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetResultSetMetadataResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetResultSetMetadata_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetResultSetMetadataResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetResultSetMetadataResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetResultSetMetadataResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceFetchResultsArgs struct { + Req *TFetchResultsReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceFetchResultsArgs() *TCLIServiceFetchResultsArgs { + return &TCLIServiceFetchResultsArgs{} +} + +var TCLIServiceFetchResultsArgs_Req_DEFAULT *TFetchResultsReq +func (p *TCLIServiceFetchResultsArgs) GetReq() *TFetchResultsReq { + if !p.IsSetReq() { + return TCLIServiceFetchResultsArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceFetchResultsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceFetchResultsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceFetchResultsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TFetchResultsReq{ + Orientation: 0, +} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceFetchResultsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "FetchResults_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceFetchResultsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceFetchResultsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceFetchResultsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceFetchResultsResult struct { + Success *TFetchResultsResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceFetchResultsResult() *TCLIServiceFetchResultsResult { + return &TCLIServiceFetchResultsResult{} +} + +var TCLIServiceFetchResultsResult_Success_DEFAULT *TFetchResultsResp +func (p *TCLIServiceFetchResultsResult) GetSuccess() *TFetchResultsResp { + if !p.IsSetSuccess() { + return TCLIServiceFetchResultsResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceFetchResultsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceFetchResultsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceFetchResultsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TFetchResultsResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceFetchResultsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "FetchResults_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceFetchResultsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceFetchResultsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceFetchResultsResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceGetDelegationTokenArgs struct { + Req *TGetDelegationTokenReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceGetDelegationTokenArgs() *TCLIServiceGetDelegationTokenArgs { + return &TCLIServiceGetDelegationTokenArgs{} +} + +var TCLIServiceGetDelegationTokenArgs_Req_DEFAULT *TGetDelegationTokenReq +func (p *TCLIServiceGetDelegationTokenArgs) GetReq() *TGetDelegationTokenReq { + if !p.IsSetReq() { + return TCLIServiceGetDelegationTokenArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceGetDelegationTokenArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceGetDelegationTokenArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetDelegationTokenArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TGetDelegationTokenReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceGetDelegationTokenArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetDelegationToken_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetDelegationTokenArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceGetDelegationTokenArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetDelegationTokenArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceGetDelegationTokenResult struct { + Success *TGetDelegationTokenResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceGetDelegationTokenResult() *TCLIServiceGetDelegationTokenResult { + return &TCLIServiceGetDelegationTokenResult{} +} + +var TCLIServiceGetDelegationTokenResult_Success_DEFAULT *TGetDelegationTokenResp +func (p *TCLIServiceGetDelegationTokenResult) GetSuccess() *TGetDelegationTokenResp { + if !p.IsSetSuccess() { + return TCLIServiceGetDelegationTokenResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceGetDelegationTokenResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceGetDelegationTokenResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceGetDelegationTokenResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TGetDelegationTokenResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceGetDelegationTokenResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "GetDelegationToken_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceGetDelegationTokenResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceGetDelegationTokenResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceGetDelegationTokenResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceCancelDelegationTokenArgs struct { + Req *TCancelDelegationTokenReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceCancelDelegationTokenArgs() *TCLIServiceCancelDelegationTokenArgs { + return &TCLIServiceCancelDelegationTokenArgs{} +} + +var TCLIServiceCancelDelegationTokenArgs_Req_DEFAULT *TCancelDelegationTokenReq +func (p *TCLIServiceCancelDelegationTokenArgs) GetReq() *TCancelDelegationTokenReq { + if !p.IsSetReq() { + return TCLIServiceCancelDelegationTokenArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceCancelDelegationTokenArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceCancelDelegationTokenArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TCancelDelegationTokenReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CancelDelegationToken_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceCancelDelegationTokenArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCancelDelegationTokenArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceCancelDelegationTokenResult struct { + Success *TCancelDelegationTokenResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceCancelDelegationTokenResult() *TCLIServiceCancelDelegationTokenResult { + return &TCLIServiceCancelDelegationTokenResult{} +} + +var TCLIServiceCancelDelegationTokenResult_Success_DEFAULT *TCancelDelegationTokenResp +func (p *TCLIServiceCancelDelegationTokenResult) GetSuccess() *TCancelDelegationTokenResp { + if !p.IsSetSuccess() { + return TCLIServiceCancelDelegationTokenResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceCancelDelegationTokenResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceCancelDelegationTokenResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TCancelDelegationTokenResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "CancelDelegationToken_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceCancelDelegationTokenResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceCancelDelegationTokenResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceCancelDelegationTokenResult(%+v)", *p) +} + +// Attributes: +// - Req +type TCLIServiceRenewDelegationTokenArgs struct { + Req *TRenewDelegationTokenReq `thrift:"req,1" db:"req" json:"req"` +} + +func NewTCLIServiceRenewDelegationTokenArgs() *TCLIServiceRenewDelegationTokenArgs { + return &TCLIServiceRenewDelegationTokenArgs{} +} + +var TCLIServiceRenewDelegationTokenArgs_Req_DEFAULT *TRenewDelegationTokenReq +func (p *TCLIServiceRenewDelegationTokenArgs) GetReq() *TRenewDelegationTokenReq { + if !p.IsSetReq() { + return TCLIServiceRenewDelegationTokenArgs_Req_DEFAULT + } +return p.Req +} +func (p *TCLIServiceRenewDelegationTokenArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TCLIServiceRenewDelegationTokenArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Req = &TRenewDelegationTokenReq{} + if err := p.Req.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "RenewDelegationToken_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) } + if err := p.Req.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) } + return err +} + +func (p *TCLIServiceRenewDelegationTokenArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceRenewDelegationTokenArgs(%+v)", *p) +} + +// Attributes: +// - Success +type TCLIServiceRenewDelegationTokenResult struct { + Success *TRenewDelegationTokenResp `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewTCLIServiceRenewDelegationTokenResult() *TCLIServiceRenewDelegationTokenResult { + return &TCLIServiceRenewDelegationTokenResult{} +} + +var TCLIServiceRenewDelegationTokenResult_Success_DEFAULT *TRenewDelegationTokenResp +func (p *TCLIServiceRenewDelegationTokenResult) GetSuccess() *TRenewDelegationTokenResp { + if !p.IsSetSuccess() { + return TCLIServiceRenewDelegationTokenResult_Success_DEFAULT + } +return p.Success +} +func (p *TCLIServiceRenewDelegationTokenResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TCLIServiceRenewDelegationTokenResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &TRenewDelegationTokenResp{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "RenewDelegationToken_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *TCLIServiceRenewDelegationTokenResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *TCLIServiceRenewDelegationTokenResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TCLIServiceRenewDelegationTokenResult(%+v)", *p) +} + + diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/client/client.go b/vendor/github.com/databricks/databricks-sql-go/internal/client/client.go new file mode 100644 index 00000000..febab52e --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/client/client.go @@ -0,0 +1,758 @@ +package client + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "log" + "math" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/databricks/databricks-sql-go/auth" + "github.com/databricks/databricks-sql-go/driverctx" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/config" + "github.com/databricks/databricks-sql-go/logger" + "github.com/hashicorp/go-retryablehttp" + "github.com/pkg/errors" +) + +// RecordResults is used to generate test data. Developer should change this manually +var RecordResults bool +var resultIndex int + +type ThriftServiceClient struct { + *cli_service.TCLIServiceClient +} + +type contextKey int + +const ( + ClientMethod contextKey = iota +) + +type clientMethod int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=clientMethod -trimprefix=clientMethod + +const ( + clientMethodUnknown clientMethod = iota + clientMethodOpenSession + clientMethodCloseSession + clientMethodFetchResults + clientMethodGetResultSetMetadata + clientMethodExecuteStatement + clientMethodGetOperationStatus + clientMethodCloseOperation + clientMethodCancelOperation +) + +var nonRetryableClientMethods map[clientMethod]any = map[clientMethod]any{ + clientMethodExecuteStatement: struct{}{}, + clientMethodUnknown: struct{}{}, +} + +var clientMethodRequestErrorMsgs map[clientMethod]string = map[clientMethod]string{ + clientMethodOpenSession: "open session request error", + clientMethodCloseSession: "close session request error", + clientMethodFetchResults: "fetch results request error", + clientMethodGetResultSetMetadata: "get result set metadata request error", + clientMethodExecuteStatement: "execute statement request error", + clientMethodGetOperationStatus: "get operation status request error", + clientMethodCloseOperation: "close operation request error", + clientMethodCancelOperation: "cancel operation request error", +} + +// OpenSession is a wrapper around the thrift operation OpenSession +// If RecordResults is true, the results will be marshalled to JSON format and written to OpenSession.json +func (tsc *ThriftServiceClient) OpenSession(ctx context.Context, req *cli_service.TOpenSessionReq) (*cli_service.TOpenSessionResp, error) { + ctx = startClientMethod(ctx, clientMethodOpenSession) + var log *logger.DBSQLLogger + msg, start := logger.Track("OpenSession") + + resp, err := tsc.TCLIServiceClient.OpenSession(ctx, req) + log, ctx = LoggerAndContext(ctx, resp) + logDisplayMessage(resp, log) + defer log.Duration(msg, start) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// CloseSession is a wrapper around the thrift operation CloseSession +// If RecordResults is true, the results will be marshalled to JSON format and written to CloseSession.json +func (tsc *ThriftServiceClient) CloseSession(ctx context.Context, req *cli_service.TCloseSessionReq) (*cli_service.TCloseSessionResp, error) { + ctx = startClientMethod(ctx, clientMethodCloseSession) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("CloseSession")) + + resp, err := tsc.TCLIServiceClient.CloseSession(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// FetchResults is a wrapper around the thrift operation FetchResults +// If RecordResults is true, the results will be marshalled to JSON format and written to FetchResults.json +func (tsc *ThriftServiceClient) FetchResults(ctx context.Context, req *cli_service.TFetchResultsReq) (*cli_service.TFetchResultsResp, error) { + ctx = startClientMethod(ctx, clientMethodFetchResults) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("FetchResults")) + + resp, err := tsc.TCLIServiceClient.FetchResults(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// GetResultSetMetadata is a wrapper around the thrift operation GetResultSetMetadata +// If RecordResults is true, the results will be marshalled to JSON format and written to GetResultSetMetadata.json +func (tsc *ThriftServiceClient) GetResultSetMetadata(ctx context.Context, req *cli_service.TGetResultSetMetadataReq) (*cli_service.TGetResultSetMetadataResp, error) { + ctx = startClientMethod(ctx, clientMethodGetResultSetMetadata) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("GetResultSetMetadata")) + + resp, err := tsc.TCLIServiceClient.GetResultSetMetadata(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// ExecuteStatement is a wrapper around the thrift operation ExecuteStatement +// If RecordResults is true, the results will be marshalled to JSON format and written to ExecuteStatement.json +func (tsc *ThriftServiceClient) ExecuteStatement(ctx context.Context, req *cli_service.TExecuteStatementReq) (*cli_service.TExecuteStatementResp, error) { + ctx = startClientMethod(ctx, clientMethodExecuteStatement) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + msg, start := log.Track("ExecuteStatement") + + // We use context.Background to fix a problem where on context done the query would not be cancelled. + resp, err := tsc.TCLIServiceClient.ExecuteStatement(context.Background(), req) + log, ctx = LoggerAndContext(ctx, resp) + logDisplayMessage(resp, log) + logExecStatementState(resp, log) + + defer log.Duration(msg, start) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + return resp, CheckStatus(resp) +} + +// GetOperationStatus is a wrapper around the thrift operation GetOperationStatus +// If RecordResults is true, the results will be marshalled to JSON format and written to GetOperationStatus.json +func (tsc *ThriftServiceClient) GetOperationStatus(ctx context.Context, req *cli_service.TGetOperationStatusReq) (*cli_service.TGetOperationStatusResp, error) { + ctx = startClientMethod(ctx, clientMethodGetOperationStatus) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("GetOperationStatus")) + + resp, err := tsc.TCLIServiceClient.GetOperationStatus(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(driverctx.NewContextWithQueryId(ctx, SprintGuid(req.OperationHandle.OperationId.GUID)), err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// CloseOperation is a wrapper around the thrift operation CloseOperation +// If RecordResults is true, the results will be marshalled to JSON format and written to CloseOperation.json +func (tsc *ThriftServiceClient) CloseOperation(ctx context.Context, req *cli_service.TCloseOperationReq) (*cli_service.TCloseOperationResp, error) { + ctx = startClientMethod(ctx, clientMethodCloseOperation) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("CloseOperation")) + + resp, err := tsc.TCLIServiceClient.CloseOperation(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// CancelOperation is a wrapper around the thrift operation CancelOperation +// If RecordResults is true, the results will be marshalled to JSON format and written to CancelOperation.json +func (tsc *ThriftServiceClient) CancelOperation(ctx context.Context, req *cli_service.TCancelOperationReq) (*cli_service.TCancelOperationResp, error) { + ctx = startClientMethod(ctx, clientMethodCancelOperation) + var log *logger.DBSQLLogger + log, ctx = LoggerAndContext(ctx, req) + defer log.Duration(logger.Track("CancelOperation")) + + resp, err := tsc.TCLIServiceClient.CancelOperation(ctx, req) + logDisplayMessage(resp, log) + if err != nil { + err = handleClientMethodError(ctx, err) + return resp, err + } + + recordResult(ctx, resp) + + return resp, CheckStatus(resp) +} + +// InitThriftClient is a wrapper of the http transport, so we can have access to response code and headers. +// It is important to know the code and headers to know if we need to retry or not +func InitThriftClient(cfg *config.Config, httpclient *http.Client) (*ThriftServiceClient, error) { + var err error + endpoint, err := cfg.ToEndpointURL() + if err != nil { + return nil, err + } + tcfg := &thrift.TConfiguration{ + TLSConfig: cfg.TLSConfig, + } + + var protocolFactory thrift.TProtocolFactory + switch cfg.ThriftProtocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactoryConf(tcfg) + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactoryConf(tcfg) + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + case "binary": + protocolFactory = thrift.NewTBinaryProtocolFactoryConf(tcfg) + case "header": + protocolFactory = thrift.NewTHeaderProtocolFactoryConf(tcfg) + default: + return nil, dbsqlerrint.NewRequestError(context.TODO(), fmt.Sprintf("invalid protocol specified %s", cfg.ThriftProtocol), nil) + } + if cfg.ThriftDebugClientProtocol { + protocolFactory = thrift.NewTDebugProtocolFactoryWithLogger(protocolFactory, "client:", thrift.StdLogger(nil)) + } + + var tTrans thrift.TTransport + + switch cfg.ThriftTransport { + case "http": + if httpclient == nil { + if cfg.Authenticator == nil { + return nil, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrNoDefaultAuthenticator, nil) + } + httpclient = RetryableClient(cfg) + } + + tTrans, err = thrift.NewTHttpClientWithOptions(endpoint, thrift.THttpClientOptions{Client: httpclient}) + + thriftHttpClient := tTrans.(*thrift.THttpClient) + userAgent := fmt.Sprintf("%s/%s", cfg.DriverName, cfg.DriverVersion) + if cfg.UserAgentEntry != "" { + userAgent = fmt.Sprintf("%s/%s (%s)", cfg.DriverName, cfg.DriverVersion, cfg.UserAgentEntry) + } + thriftHttpClient.SetHeader("User-Agent", userAgent) + + default: + return nil, dbsqlerrint.NewDriverError(context.TODO(), fmt.Sprintf("unsupported transport `%s`", cfg.ThriftTransport), nil) + } + if err != nil { + return nil, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrInvalidURL, err) + } + if err = tTrans.Open(); err != nil { + return nil, dbsqlerrint.NewRequestError(context.TODO(), fmt.Sprintf("failed to open http transport for endpoint %s", endpoint), err) + } + iprot := protocolFactory.GetProtocol(tTrans) + oprot := protocolFactory.GetProtocol(tTrans) + tclient := cli_service.NewTCLIServiceClient(thrift.NewTStandardClient(iprot, oprot)) + tsClient := &ThriftServiceClient{tclient} + return tsClient, nil +} + +func startClientMethod(ctx context.Context, method clientMethod) context.Context { + ctx = context.WithValue(ctx, ClientMethod, method) + log, _ := LoggerAndContext(ctx, nil) + log.Debug().Msgf("client.%s", method.String()) + return ctx +} + +// handler function for errors returned by the thrift client methods +func handleClientMethodError(ctx context.Context, err error) dbsqlerr.DBRequestError { + if err == nil { + return nil + } + + // If the passed error indicates an invalid session we inject a bad connection error + // into the error stack. This allows the for retrying with a new connection. + s := err.Error() + if strings.Contains(s, "Invalid SessionHandle") { + err = dbsqlerrint.NewBadConnectionError(err) + } + + // the passed error will be wrapped in a DBRequestError + method := getClientMethod(ctx) + msg := clientMethodRequestErrorMsgs[method] + + dbErr := dbsqlerrint.NewRequestError(ctx, msg, err) + + log, _ := LoggerAndContext(ctx, nil) + + log.Err(err).Msg("") + + return dbErr +} + +// Extract a clientMethod value from the given Context. +func getClientMethod(ctx context.Context) clientMethod { + v, _ := ctx.Value(ClientMethod).(clientMethod) + return v +} + +// Write the result +func recordResult(ctx context.Context, resp any) { + if RecordResults && resp != nil { + method := getClientMethod(ctx) + j, _ := json.MarshalIndent(resp, "", " ") + _ = os.WriteFile(fmt.Sprintf("%s%d.json", method, resultIndex), j, 0600) + resultIndex++ + } +} + +// ThriftResponse represents the thrift rpc response +type ThriftResponse interface { + GetStatus() *cli_service.TStatus +} + +// CheckStatus checks the status code after a thrift operation. +// Returns nil if the operation is successful or still executing, otherwise returns an error. +func CheckStatus(resp interface{}) error { + rpcresp, ok := resp.(ThriftResponse) + if ok { + status := rpcresp.GetStatus() + if status.StatusCode == cli_service.TStatusCode_ERROR_STATUS { + return errors.New(status.GetErrorMessage()) + } + if status.StatusCode == cli_service.TStatusCode_INVALID_HANDLE_STATUS { + return errors.New("thrift: invalid handle") + } + + // SUCCESS, SUCCESS_WITH_INFO, STILL_EXECUTING are ok + return nil + } + return errors.New("thrift: invalid response") +} + +// SprintGuid is a convenience function to format a byte array into GUID. +func SprintGuid(bts []byte) string { + if len(bts) == 16 { + return fmt.Sprintf("%x-%x-%x-%x-%x", bts[0:4], bts[4:6], bts[6:8], bts[8:10], bts[10:16]) + } + logger.Warn().Msgf("GUID not valid: %x", bts) + return fmt.Sprintf("%x", bts) +} + +// Create an updated context and a logger that includes query and connection id +func LoggerAndContext(ctx context.Context, c any) (*logger.DBSQLLogger, context.Context) { + connId := driverctx.ConnIdFromContext(ctx) + corrId := driverctx.CorrelationIdFromContext(ctx) + queryId := driverctx.QueryIdFromContext(ctx) + if connId == "" { + connId = guidFromHasSessionHandle(c) + ctx = driverctx.NewContextWithConnId(ctx, connId) + } + if queryId == "" { + queryId = guidFromHasOpHandle(c) + ctx = driverctx.NewContextWithQueryId(ctx, queryId) + } + log := logger.WithContext(connId, corrId, queryId) + + return log, ctx +} + +type hasOpHandle interface { + GetOperationHandle() *cli_service.TOperationHandle +} +type hasSessionHandle interface { + GetSessionHandle() *cli_service.TSessionHandle +} + +func guidFromHasOpHandle(c any) (guid string) { + if c == nil || reflect.ValueOf(c).IsNil() { + return + } + if ho, ok := c.(hasOpHandle); ok { + opHandle := ho.GetOperationHandle() + if opHandle != nil && opHandle.OperationId != nil && opHandle.OperationId.GUID != nil { + guid = SprintGuid(opHandle.OperationId.GUID) + } + } + return +} + +func guidFromHasSessionHandle(c any) (guid string) { + if c == nil || reflect.ValueOf(c).IsNil() { + return + } + if ho, ok := c.(hasSessionHandle); ok { + sessionHandle := ho.GetSessionHandle() + if sessionHandle != nil && sessionHandle.SessionId != nil && sessionHandle.SessionId.GUID != nil { + guid = SprintGuid(sessionHandle.SessionId.GUID) + } + } + return +} + +func logExecStatementState(resp *cli_service.TExecuteStatementResp, log *logger.DBSQLLogger) { + if resp != nil { + if resp.DirectResults != nil { + state := resp.DirectResults.GetOperationStatus().GetOperationState() + log.Debug().Msgf("execute statement state: %s", state) + status := resp.DirectResults.GetOperationStatus().GetStatus().StatusCode + log.Debug().Msgf("execute statement status: %s", status) + logDisplayMessage(resp.DirectResults, log) + } else { + status := resp.GetStatus().StatusCode + log.Debug().Msgf("execute statement status: %s", status) + } + } +} + +type hasGetStatus interface{ GetStatus() *cli_service.TStatus } +type hasGetDisplayMessage interface{ GetDisplayMessage() string } +type hasGetOperationStatus interface { + GetOperationStatus() *cli_service.TGetOperationStatusResp +} + +func logDisplayMessage(c any, log *logger.DBSQLLogger) { + if c == nil || reflect.ValueOf(c).IsNil() { + return + } + + if hd, ok := c.(hasGetDisplayMessage); ok { + dm := hd.GetDisplayMessage() + if dm != "" { + log.Debug().Msg(dm) + } + } else if gs, ok := c.(hasGetStatus); ok { + logDisplayMessage(gs.GetStatus(), log) + } else if gos, ok := c.(hasGetOperationStatus); ok { + logDisplayMessage(gos.GetOperationStatus(), log) + } +} + +var retryableStatusCodes = map[int]any{http.StatusTooManyRequests: struct{}{}, http.StatusServiceUnavailable: struct{}{}} + +func isRetryableServerResponse(resp *http.Response) bool { + if resp == nil { + return false + } + + _, ok := retryableStatusCodes[resp.StatusCode] + return ok +} + +type Transport struct { + Base http.RoundTripper + Authr auth.Authenticator + trace bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.trace { + trace := &httptrace.ClientTrace{ + GotConn: func(info httptrace.GotConnInfo) { log.Printf("conn was reused: %t", info.Reused) }, + } + req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) + } + + defer logger.Duration(logger.Track("RoundTrip")) + // this is inspired by oauth2.Transport + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + req2 := cloneRequest(req) // per RoundTripper contract + + err := t.Authr.Authenticate(req2) + + if err != nil { + return nil, err + } + + // req.Body is assumed to be closed by the base RoundTripper. + reqBodyClosed = true + resp, err := t.Base.RoundTrip(req2) + + return resp, err +} + +func RetryableClient(cfg *config.Config) *http.Client { + httpclient := PooledClient(cfg) + retryableClient := &retryablehttp.Client{ + HTTPClient: httpclient, + Logger: &leveledLogger{}, + RetryWaitMin: cfg.RetryWaitMin, + RetryWaitMax: cfg.RetryWaitMax, + RetryMax: cfg.RetryMax, + ErrorHandler: errorHandler, + CheckRetry: RetryPolicy, + Backoff: backoff, + } + return retryableClient.StandardClient() +} + +func PooledTransport(cfg *config.Config) *http.Transport { + var tlsConfig *tls.Config + if (cfg.TLSConfig != nil) && cfg.TLSConfig.InsecureSkipVerify { + tlsConfig = cfg.TLSConfig + } + + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + TLSClientConfig: tlsConfig, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 180 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: 10, // this client is only used for one host + MaxConnsPerHost: 100, + } + return transport +} + +func PooledClient(cfg *config.Config) *http.Client { + if cfg.Authenticator == nil { + return nil + } + + var tr *Transport + if cfg.Transport != nil { + tr = &Transport{ + Base: cfg.Transport, + Authr: cfg.Authenticator, + } + } else { + tr = &Transport{ + Base: PooledTransport(cfg), + Authr: cfg.Authenticator, + } + } + + return &http.Client{ + Transport: tr, + Timeout: cfg.ClientTimeout, + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type leveledLogger struct { +} + +func (l *leveledLogger) Error(msg string, keysAndValues ...interface{}) { + logger.Error().Msg(msg) +} +func (l *leveledLogger) Info(msg string, keysAndValues ...interface{}) { + logger.Info().Msg(msg) +} +func (l *leveledLogger) Debug(msg string, keysAndValues ...interface{}) { + logger.Debug().Msg(msg) +} +func (l *leveledLogger) Warn(msg string, keysAndValues ...interface{}) { + logger.Warn().Msg(msg) +} + +func errorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { + var werr error + msg := fmt.Sprintf("request error after %d attempt(s)", numTries) + if err == nil { + werr = errors.New(msg) + } else { + werr = errors.Wrap(err, msg) + } + + if resp != nil { + if resp.Header != nil { + reason := resp.Header.Get("X-Databricks-Reason-Phrase") + terrmsg := resp.Header.Get("X-Thriftserver-Error-Message") + + if reason != "" { + werr = dbsqlerrint.WrapErr(werr, reason) + } else if terrmsg != "" { + werr = dbsqlerrint.WrapErr(werr, terrmsg) + } + } + + logger.Err(werr).Msg(resp.Status) + } + + return resp, werr +} + +var ( + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) + + errorRes = []*regexp.Regexp{redirectsErrorRe, schemeErrorRe, notTrustedErrorRe} +) + +func RetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + caller := getClientMethod(ctx) + _, nonRetryableClientMethod := nonRetryableClientMethods[caller] + + if err != nil { + if isRetryableError(err) && !nonRetryableClientMethod { + return true, nil + } + + return false, err + } + + // shouldn't retry on no response or success + if resp == nil || resp.StatusCode == http.StatusOK { + return false, nil + } + + checkErr := fmt.Errorf("unexpected HTTP status %s", resp.Status) + + // 429 Too Many Requests or 503 service unavailable is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if isRetryableServerResponse(resp) { + var retryAfter string + if resp.Header != nil { + retryAfter = resp.Header.Get("Retry-After") + } + + return true, dbsqlerrint.NewRetryableError(checkErr, retryAfter) + } + + if !nonRetryableClientMethod && (resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented)) { + return true, checkErr + } + + // checkErr will be non-nil if the response code was not StatusOK. + // Returning it here ensures that the error handler will be called. + return false, checkErr +} + +func isRetryableError(err error) bool { + if err == nil { + return false + } + + if v, ok := err.(*url.Error); ok { + s := v.Error() + for _, re := range errorRes { + if re.MatchString(s) { + return false + } + } + + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false + } + } + + // The error is likely recoverable so retry. + return true +} + +func backoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // honour the Retry-After header + if resp != nil && resp.Header != nil { + if s, ok := resp.Header["Retry-After"]; ok { + if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { + return time.Second * time.Duration(sleep) + } + } + } + + // exponential backoff + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/client/clientmethod_string.go b/vendor/github.com/databricks/databricks-sql-go/internal/client/clientmethod_string.go new file mode 100644 index 00000000..0e284f07 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/client/clientmethod_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=clientMethod -trimprefix=clientMethod"; DO NOT EDIT. + +package client + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[clientMethodUnknown-0] + _ = x[clientMethodOpenSession-1] + _ = x[clientMethodCloseSession-2] + _ = x[clientMethodFetchResults-3] + _ = x[clientMethodGetResultSetMetadata-4] + _ = x[clientMethodExecuteStatement-5] + _ = x[clientMethodGetOperationStatus-6] + _ = x[clientMethodCloseOperation-7] + _ = x[clientMethodCancelOperation-8] +} + +const _clientMethod_name = "UnknownOpenSessionCloseSessionFetchResultsGetResultSetMetadataExecuteStatementGetOperationStatusCloseOperationCancelOperation" + +var _clientMethod_index = [...]uint8{0, 7, 18, 30, 42, 62, 78, 96, 110, 125} + +func (i clientMethod) String() string { + if i < 0 || i >= clientMethod(len(_clientMethod_index)-1) { + return "clientMethod(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _clientMethod_name[_clientMethod_index[i]:_clientMethod_index[i+1]] +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/client/testclient.go b/vendor/github.com/databricks/databricks-sql-go/internal/client/testclient.go new file mode 100644 index 00000000..cc0986e8 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/client/testclient.go @@ -0,0 +1,163 @@ +package client + +import ( + "context" + "errors" + + "github.com/databricks/databricks-sql-go/internal/cli_service" +) + +var ErrNotImplemented = errors.New("databricks: not implemented") + +type TestClient struct { + FnOpenSession func(ctx context.Context, req *cli_service.TOpenSessionReq) (_r *cli_service.TOpenSessionResp, _err error) + FnCloseSession func(ctx context.Context, req *cli_service.TCloseSessionReq) (_r *cli_service.TCloseSessionResp, _err error) + FnGetInfo func(ctx context.Context, req *cli_service.TGetInfoReq) (_r *cli_service.TGetInfoResp, _err error) + FnExecuteStatement func(ctx context.Context, req *cli_service.TExecuteStatementReq) (_r *cli_service.TExecuteStatementResp, _err error) + FnGetTypeInfo func(ctx context.Context, req *cli_service.TGetTypeInfoReq) (_r *cli_service.TGetTypeInfoResp, _err error) + FnGetCatalogs func(ctx context.Context, req *cli_service.TGetCatalogsReq) (_r *cli_service.TGetCatalogsResp, _err error) + FnGetSchemas func(ctx context.Context, req *cli_service.TGetSchemasReq) (_r *cli_service.TGetSchemasResp, _err error) + FnGetTables func(ctx context.Context, req *cli_service.TGetTablesReq) (_r *cli_service.TGetTablesResp, _err error) + FnGetTableTypes func(ctx context.Context, req *cli_service.TGetTableTypesReq) (_r *cli_service.TGetTableTypesResp, _err error) + FnGetColumns func(ctx context.Context, req *cli_service.TGetColumnsReq) (_r *cli_service.TGetColumnsResp, _err error) + FnGetFunctions func(ctx context.Context, req *cli_service.TGetFunctionsReq) (_r *cli_service.TGetFunctionsResp, _err error) + FnGetPrimaryKeys func(ctx context.Context, req *cli_service.TGetPrimaryKeysReq) (_r *cli_service.TGetPrimaryKeysResp, _err error) + FnGetCrossReference func(ctx context.Context, req *cli_service.TGetCrossReferenceReq) (_r *cli_service.TGetCrossReferenceResp, _err error) + FnGetOperationStatus func(ctx context.Context, req *cli_service.TGetOperationStatusReq) (_r *cli_service.TGetOperationStatusResp, _err error) + FnCancelOperation func(ctx context.Context, req *cli_service.TCancelOperationReq) (_r *cli_service.TCancelOperationResp, _err error) + FnCloseOperation func(ctx context.Context, req *cli_service.TCloseOperationReq) (_r *cli_service.TCloseOperationResp, _err error) + FnGetResultSetMetadata func(ctx context.Context, req *cli_service.TGetResultSetMetadataReq) (_r *cli_service.TGetResultSetMetadataResp, _err error) + FnFetchResults func(ctx context.Context, req *cli_service.TFetchResultsReq) (_r *cli_service.TFetchResultsResp, _err error) + FnGetDelegationToken func(ctx context.Context, req *cli_service.TGetDelegationTokenReq) (_r *cli_service.TGetDelegationTokenResp, _err error) + FnCancelDelegationToken func(ctx context.Context, req *cli_service.TCancelDelegationTokenReq) (_r *cli_service.TCancelDelegationTokenResp, _err error) + FnRenewDelegationToken func(ctx context.Context, req *cli_service.TRenewDelegationTokenReq) (_r *cli_service.TRenewDelegationTokenResp, _err error) +} + +var _ cli_service.TCLIService = (*TestClient)(nil) + +func (c *TestClient) OpenSession(ctx context.Context, req *cli_service.TOpenSessionReq) (_r *cli_service.TOpenSessionResp, _err error) { + if c.FnOpenSession != nil { + return c.FnOpenSession(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) CloseSession(ctx context.Context, req *cli_service.TCloseSessionReq) (_r *cli_service.TCloseSessionResp, _err error) { + if c.FnCloseSession != nil { + return c.FnCloseSession(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetInfo(ctx context.Context, req *cli_service.TGetInfoReq) (_r *cli_service.TGetInfoResp, _err error) { + if c.FnGetInfo != nil { + return c.FnGetInfo(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) ExecuteStatement(ctx context.Context, req *cli_service.TExecuteStatementReq) (_r *cli_service.TExecuteStatementResp, _err error) { + if c.FnExecuteStatement != nil { + return c.FnExecuteStatement(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetTypeInfo(ctx context.Context, req *cli_service.TGetTypeInfoReq) (_r *cli_service.TGetTypeInfoResp, _err error) { + if c.FnGetTypeInfo != nil { + return c.FnGetTypeInfo(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetCatalogs(ctx context.Context, req *cli_service.TGetCatalogsReq) (_r *cli_service.TGetCatalogsResp, _err error) { + if c.FnGetCatalogs != nil { + return c.FnGetCatalogs(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetSchemas(ctx context.Context, req *cli_service.TGetSchemasReq) (_r *cli_service.TGetSchemasResp, _err error) { + if c.FnGetSchemas != nil { + return c.FnGetSchemas(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetTables(ctx context.Context, req *cli_service.TGetTablesReq) (_r *cli_service.TGetTablesResp, _err error) { + if c.FnGetTables != nil { + return c.FnGetTables(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetTableTypes(ctx context.Context, req *cli_service.TGetTableTypesReq) (_r *cli_service.TGetTableTypesResp, _err error) { + if c.FnGetTableTypes != nil { + return c.FnGetTableTypes(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetColumns(ctx context.Context, req *cli_service.TGetColumnsReq) (_r *cli_service.TGetColumnsResp, _err error) { + if c.FnGetColumns != nil { + return c.FnGetColumns(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetFunctions(ctx context.Context, req *cli_service.TGetFunctionsReq) (_r *cli_service.TGetFunctionsResp, _err error) { + if c.FnGetFunctions != nil { + return c.FnGetFunctions(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetPrimaryKeys(ctx context.Context, req *cli_service.TGetPrimaryKeysReq) (_r *cli_service.TGetPrimaryKeysResp, _err error) { + if c.FnGetPrimaryKeys != nil { + return c.FnGetPrimaryKeys(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetCrossReference(ctx context.Context, req *cli_service.TGetCrossReferenceReq) (_r *cli_service.TGetCrossReferenceResp, _err error) { + if c.FnGetCrossReference != nil { + return c.FnGetCrossReference(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetOperationStatus(ctx context.Context, req *cli_service.TGetOperationStatusReq) (_r *cli_service.TGetOperationStatusResp, _err error) { + if c.FnGetOperationStatus != nil { + return c.FnGetOperationStatus(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) CancelOperation(ctx context.Context, req *cli_service.TCancelOperationReq) (_r *cli_service.TCancelOperationResp, _err error) { + if c.FnCancelOperation != nil { + return c.FnCancelOperation(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) CloseOperation(ctx context.Context, req *cli_service.TCloseOperationReq) (_r *cli_service.TCloseOperationResp, _err error) { + if c.FnCloseOperation != nil { + return c.FnCloseOperation(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetResultSetMetadata(ctx context.Context, req *cli_service.TGetResultSetMetadataReq) (_r *cli_service.TGetResultSetMetadataResp, _err error) { + if c.FnGetResultSetMetadata != nil { + return c.FnGetResultSetMetadata(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) FetchResults(ctx context.Context, req *cli_service.TFetchResultsReq) (_r *cli_service.TFetchResultsResp, _err error) { + if c.FnFetchResults != nil { + return c.FnFetchResults(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) GetDelegationToken(ctx context.Context, req *cli_service.TGetDelegationTokenReq) (_r *cli_service.TGetDelegationTokenResp, _err error) { + if c.FnGetDelegationToken != nil { + return c.FnGetDelegationToken(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) CancelDelegationToken(ctx context.Context, req *cli_service.TCancelDelegationTokenReq) (_r *cli_service.TCancelDelegationTokenResp, _err error) { + if c.FnCancelDelegationToken != nil { + return c.FnCancelDelegationToken(ctx, req) + } + return nil, ErrNotImplemented +} +func (c *TestClient) RenewDelegationToken(ctx context.Context, req *cli_service.TRenewDelegationTokenReq) (_r *cli_service.TRenewDelegationTokenResp, _err error) { + if c.FnRenewDelegationToken != nil { + return c.FnRenewDelegationToken(ctx, req) + } + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/config/config.go b/vendor/github.com/databricks/databricks-sql-go/internal/config/config.go new file mode 100644 index 00000000..d5ede1c7 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/config/config.go @@ -0,0 +1,498 @@ +package config + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/pkg/errors" + + "github.com/databricks/databricks-sql-go/auth" + "github.com/databricks/databricks-sql-go/auth/noop" + "github.com/databricks/databricks-sql-go/auth/oauth/m2m" + "github.com/databricks/databricks-sql-go/auth/oauth/u2m" + "github.com/databricks/databricks-sql-go/auth/pat" + "github.com/databricks/databricks-sql-go/internal/cli_service" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/logger" +) + +// Driver Configurations. +// Only UserConfig are currently exposed to users +type Config struct { + UserConfig + TLSConfig *tls.Config // nil disables TLS + ArrowConfig + PollInterval time.Duration + ClientTimeout time.Duration // max time the http request can last + PingTimeout time.Duration // max time allowed for ping + CanUseMultipleCatalogs bool + DriverName string + DriverVersion string + ThriftProtocol string + ThriftTransport string + ThriftProtocolVersion cli_service.TProtocolVersion + ThriftDebugClientProtocol bool +} + +// ToEndpointURL generates the endpoint URL from Config that a Thrift client will connect to +func (c *Config) ToEndpointURL() (string, error) { + var userInfo string + endpointUrl := fmt.Sprintf("%s://%s%s:%d%s", c.Protocol, userInfo, c.Host, c.Port, c.HTTPPath) + if c.Host == "" { + return endpointUrl, errors.New("databricks: missing Hostname") + } + if c.Port == 0 { + return endpointUrl, errors.New("databricks: missing Port") + } + if c.HTTPPath == "" && c.Host != "localhost" { + return endpointUrl, errors.New("databricks: missing HTTP Path") + } + return endpointUrl, nil +} + +// DeepCopy returns a true deep copy of Config +func (c *Config) DeepCopy() *Config { + if c == nil { + return nil + } + + return &Config{ + UserConfig: c.UserConfig.DeepCopy(), + TLSConfig: c.TLSConfig.Clone(), + ArrowConfig: c.ArrowConfig.DeepCopy(), + PollInterval: c.PollInterval, + ClientTimeout: c.ClientTimeout, + PingTimeout: c.PingTimeout, + CanUseMultipleCatalogs: c.CanUseMultipleCatalogs, + DriverName: c.DriverName, + DriverVersion: c.DriverVersion, + ThriftProtocol: c.ThriftProtocol, + ThriftTransport: c.ThriftTransport, + ThriftProtocolVersion: c.ThriftProtocolVersion, + ThriftDebugClientProtocol: c.ThriftDebugClientProtocol, + } +} + +// UserConfig is the set of configurations exposed to users +type UserConfig struct { + Protocol string + Host string // from databricks UI + Port int // from databricks UI + HTTPPath string // from databricks UI + Catalog string + Schema string + Authenticator auth.Authenticator + AccessToken string // from databricks UI + MaxRows int // max rows per page + QueryTimeout time.Duration // Timeout passed to server for query processing + UserAgentEntry string + Location *time.Location + SessionParams map[string]string + RetryWaitMin time.Duration + RetryWaitMax time.Duration + RetryMax int + Transport http.RoundTripper + UseLz4Compression bool + CloudFetchConfig +} + +// DeepCopy returns a true deep copy of UserConfig +func (ucfg UserConfig) DeepCopy() UserConfig { + var sessionParams map[string]string + if ucfg.SessionParams != nil { + sessionParams = make(map[string]string) + for k, v := range ucfg.SessionParams { + sessionParams[k] = v + } + } + var loccp *time.Location + if ucfg.Location != nil { + var err error + loccp, err = time.LoadLocation(ucfg.Location.String()) + if err != nil { + logger.Warn().Msg("could not copy location") + } + + } + + return UserConfig{ + Protocol: ucfg.Protocol, + Host: ucfg.Host, + Port: ucfg.Port, + HTTPPath: ucfg.HTTPPath, + Catalog: ucfg.Catalog, + Schema: ucfg.Schema, + Authenticator: ucfg.Authenticator, + AccessToken: ucfg.AccessToken, + MaxRows: ucfg.MaxRows, + QueryTimeout: ucfg.QueryTimeout, + UserAgentEntry: ucfg.UserAgentEntry, + Location: loccp, + SessionParams: sessionParams, + RetryWaitMin: ucfg.RetryWaitMin, + RetryWaitMax: ucfg.RetryWaitMax, + RetryMax: ucfg.RetryMax, + Transport: ucfg.Transport, + UseLz4Compression: ucfg.UseLz4Compression, + CloudFetchConfig: ucfg.CloudFetchConfig, + } +} + +var defaultMaxRows = 100000 + +// WithDefaults provides default settings for optional fields in UserConfig +func (ucfg UserConfig) WithDefaults() UserConfig { + if ucfg.MaxRows <= 0 { + ucfg.MaxRows = defaultMaxRows + } + if ucfg.Protocol == "" { + ucfg.Protocol = "https" + ucfg.Port = 443 + } + if ucfg.Port == 0 { + ucfg.Port = 443 + } + if ucfg.Authenticator == nil { + ucfg.Authenticator = &noop.NoopAuth{} + } + if ucfg.SessionParams == nil { + ucfg.SessionParams = make(map[string]string) + } + if ucfg.RetryMax == 0 { + ucfg.RetryMax = 4 + } + if ucfg.RetryWaitMin == 0 { + ucfg.RetryWaitMin = 1 * time.Second + } + if ucfg.RetryWaitMax == 0 { + ucfg.RetryWaitMax = 30 * time.Second + } + ucfg.UseLz4Compression = false + ucfg.CloudFetchConfig = CloudFetchConfig{}.WithDefaults() + + return ucfg +} + +// WithDefaults provides default settings for Config +func WithDefaults() *Config { + return &Config{ + UserConfig: UserConfig{}.WithDefaults(), + TLSConfig: &tls.Config{MinVersion: tls.VersionTLS12}, + ArrowConfig: ArrowConfig{}.WithDefaults(), + PollInterval: 1 * time.Second, + ClientTimeout: 900 * time.Second, + PingTimeout: 60 * time.Second, + CanUseMultipleCatalogs: true, + DriverName: "godatabrickssqlconnector", // important. Do not change + ThriftProtocol: "binary", + ThriftTransport: "http", + ThriftProtocolVersion: cli_service.TProtocolVersion_SPARK_CLI_SERVICE_PROTOCOL_V8, + ThriftDebugClientProtocol: false, + } + +} + +// ParseDSN constructs UserConfig and CloudFetchConfig by parsing DSN string supplied to `sql.Open()` +func ParseDSN(dsn string) (UserConfig, error) { + fullDSN := dsn + if !strings.HasPrefix(dsn, "https://") && !strings.HasPrefix(dsn, "http://") { + fullDSN = "https://" + dsn + } + parsedURL, err := url.Parse(fullDSN) + if err != nil { + return UserConfig{}, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrInvalidDSNFormat, err) + } + ucfg := UserConfig{}.WithDefaults() + ucfg.Protocol = parsedURL.Scheme + ucfg.Host = parsedURL.Hostname() + port, err := strconv.Atoi(parsedURL.Port()) + if err != nil { + return UserConfig{}, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrInvalidDSNPort, err) + } + ucfg.Port = port + + ucfg.HTTPPath = parsedURL.Path + + // Any params that are not specifically handled are assumed to be session params. + // Use extractableParams so that the processed values are deleted as we go. + params := &extractableParams{Values: parsedURL.Query()} + + // Create an authenticator based on the url and params + err = makeAuthenticator(parsedURL, params, &ucfg) + if err != nil { + return UserConfig{}, err + } + + if maxRows, ok, err := params.extractAsInt("maxRows"); ok { + if err != nil { + return UserConfig{}, err + } + if maxRows > 0 { + ucfg.MaxRows = maxRows + } + } + + if timeoutSeconds, ok, err := params.extractAsInt("timeout"); ok { + if err != nil { + return UserConfig{}, err + } + ucfg.QueryTimeout = time.Duration(timeoutSeconds) * time.Second + } + + if catalog, ok := params.extract("catalog"); ok { + ucfg.Catalog = catalog + } + if userAgent, ok := params.extract("userAgentEntry"); ok { + ucfg.UserAgentEntry = userAgent + params.Del("userAgentEntry") + } + if schema, ok := params.extract("schema"); ok { + ucfg.Schema = schema + } + + // Cloud Fetch parameters + if useCloudFetch, ok, err := params.extractAsBool("useCloudFetch"); ok { + if err != nil { + return UserConfig{}, err + } + ucfg.UseCloudFetch = useCloudFetch + } + + if numThreads, ok, err := params.extractAsInt("maxDownloadThreads"); ok { + if err != nil { + return UserConfig{}, err + } + ucfg.MaxDownloadThreads = numThreads + } + + // for timezone we do a case insensitive key match. + // We use getNoCase because we want to leave timezone in the params so that it will also + // be used as a session param. + if timezone, ok := params.getNoCase("timezone"); ok { + ucfg.Location, err = time.LoadLocation(timezone) + } + + // any left over params are treated as session params + if len(params.Values) > 0 { + sessionParams := make(map[string]string) + for k := range params.Values { + sessionParams[k] = params.Get(k) + } + ucfg.SessionParams = sessionParams + } + + return ucfg, err +} + +// update the config with an authenticator based on the value from the parsed DSN +func makeAuthenticator(parsedURL *url.URL, params *extractableParams, config *UserConfig) error { + name := parsedURL.User.Username() + // if the user name is set to 'token' we will interpret the password as an acess token + if name == "token" { + pass, _ := parsedURL.User.Password() + return addPatAuthenticator(pass, config) + } else if name != "" { + // Currently don't support user name/password authentication + return dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrBasicAuthNotSupported, nil) + } else { + // Process parameters that specify the authentication type. They are removed from params + // Get the optional authentication type param + authTypeS, _ := params.extract("authType") + authType := auth.ParseAuthType(authTypeS) + + // Get optional parameters for creating an authenticator + clientId, hasClientId := params.extract("clientId") + if !hasClientId { + clientId, hasClientId = params.extract("clientID") + } + + clientSecret, hasClientSecret := params.extract("clientSecret") + accessToken, hasAccessToken := params.extract("accessToken") + + switch authType { + case auth.AuthTypeUnknown: + // if no authentication type is specified create an authenticator based on which + // params have values + if hasAccessToken { + return addPatAuthenticator(accessToken, config) + } + + if hasClientId || hasClientSecret { + return addOauthM2MAuthenticator(clientId, clientSecret, config) + } + case auth.AuthTypePat: + return addPatAuthenticator(accessToken, config) + case auth.AuthTypeOauthM2M: + return addOauthM2MAuthenticator(clientId, clientSecret, config) + case auth.AuthTypeOauthU2M: + return addOauthU2MAuthenticator(config) + } + + } + + return nil +} + +func addPatAuthenticator(accessToken string, config *UserConfig) error { + if accessToken == "" { + return dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrInvalidDSNPATIsEmpty, nil) + } + config.AccessToken = accessToken + pat := &pat.PATAuth{ + AccessToken: accessToken, + } + config.Authenticator = pat + return nil +} + +func addOauthM2MAuthenticator(clientId, clientSecret string, config *UserConfig) error { + if clientId == "" || clientSecret == "" { + return dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.ErrInvalidDSNM2m, nil) + } + + m2m := m2m.NewAuthenticator(clientId, clientSecret, config.Host) + config.Authenticator = m2m + return nil +} + +func addOauthU2MAuthenticator(config *UserConfig) error { + u2m, err := u2m.NewAuthenticator(config.Host, 0) + if err == nil { + config.Authenticator = u2m + } + return err +} + +type extractableParams struct { + url.Values +} + +// returns the value corresponding to the key, if any, and a bool flag indicating if +// there was a set value and it is not the empty string +// deletes the key/value from params +func (params *extractableParams) extract(key string) (string, bool) { + return extractParam(key, params, false, true) +} + +func (params *extractableParams) extractAsInt(key string) (int, bool, error) { + if intString, ok := extractParam(key, params, false, true); ok { + i, err := strconv.Atoi(intString) + if err != nil { + return 0, true, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.InvalidDSNFormat(key, intString, "int"), err) + } + + return i, true, nil + } + + return 0, false, nil +} + +func (params *extractableParams) extractAsBool(key string) (bool, bool, error) { + if boolString, ok := extractParam(key, params, false, true); ok { + b, err := strconv.ParseBool(boolString) + if err != nil { + return false, true, dbsqlerrint.NewRequestError(context.TODO(), dbsqlerr.InvalidDSNFormat(key, boolString, "bool"), err) + } + + return b, true, nil + } + return false, false, nil +} + +// returns the value corresponding to the key using case insensitive key matching and a bool flag +// indicating if the value was set and is not the empty string +func (params *extractableParams) getNoCase(key string) (string, bool) { + return extractParam(key, params, true, false) +} + +func extractParam(key string, params *extractableParams, ignoreCase bool, delValue bool) (string, bool) { + if ignoreCase { + key = strings.ToLower(key) + } + + for k := range params.Values { + kc := k + if ignoreCase { + kc = strings.ToLower(k) + } + if kc == key { + val := params.Get(k) + if delValue { + params.Del(k) + } + return val, val != "" + } + } + + return "", false +} + +type ArrowConfig struct { + UseArrowBatches bool + UseArrowNativeDecimal bool + UseArrowNativeTimestamp bool + + // the following are currently not supported + UseArrowNativeComplexTypes bool + UseArrowNativeIntervalTypes bool +} + +func (ucfg ArrowConfig) WithDefaults() ArrowConfig { + ucfg.UseArrowBatches = true + ucfg.UseArrowNativeTimestamp = true + ucfg.UseArrowNativeComplexTypes = true + + return ucfg +} + +// DeepCopy returns a true deep copy of UserConfig +func (arrowConfig ArrowConfig) DeepCopy() ArrowConfig { + return ArrowConfig{ + UseArrowBatches: arrowConfig.UseArrowBatches, + UseArrowNativeDecimal: arrowConfig.UseArrowNativeDecimal, + UseArrowNativeTimestamp: arrowConfig.UseArrowNativeTimestamp, + UseArrowNativeComplexTypes: arrowConfig.UseArrowNativeComplexTypes, + UseArrowNativeIntervalTypes: arrowConfig.UseArrowNativeIntervalTypes, + } +} + +type CloudFetchConfig struct { + UseCloudFetch bool + MaxDownloadThreads int + MaxFilesInMemory int + MinTimeToExpiry time.Duration +} + +func (cfg CloudFetchConfig) WithDefaults() CloudFetchConfig { + cfg.UseCloudFetch = false + + if cfg.MaxDownloadThreads <= 0 { + cfg.MaxDownloadThreads = 10 + } + + if cfg.MaxFilesInMemory < 1 { + cfg.MaxFilesInMemory = 10 + } + + if cfg.MinTimeToExpiry < 0 { + cfg.MinTimeToExpiry = 0 * time.Second + } + + return cfg +} + +func (cfg CloudFetchConfig) DeepCopy() CloudFetchConfig { + return CloudFetchConfig{ + UseCloudFetch: cfg.UseCloudFetch, + MaxDownloadThreads: cfg.MaxDownloadThreads, + MaxFilesInMemory: cfg.MaxFilesInMemory, + MinTimeToExpiry: cfg.MinTimeToExpiry, + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/errors/err.go b/vendor/github.com/databricks/databricks-sql-go/internal/errors/err.go new file mode 100644 index 00000000..26a642e3 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/errors/err.go @@ -0,0 +1,274 @@ +package errors + +import ( + "context" + "database/sql/driver" + "fmt" + "strconv" + "time" + + "github.com/databricks/databricks-sql-go/driverctx" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/pkg/errors" +) + +// value to use with errors.Is() to determine if an error +// chain contains a retryable error +var RetryableError error = errors.New("Retryable Error") + +// base databricks error +type databricksError struct { + err error + correlationId string + connectionId string + errType string + isRetryable bool + retryAfter time.Duration +} + +var _ error = (*databricksError)(nil) + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func newDatabricksError(ctx context.Context, msg string, err error) databricksError { + // create an error with the new message + if err == nil { + err = errors.New(msg) + } else { + err = errors.WithMessage(err, msg) + } + + // if the source error does not have a stack trace in its + // error chain add a stack trace + var st stackTracer + if ok := errors.As(err, &st); !ok { + err = errors.WithStack(err) + } + + // If the error chain contains an instance of retryableError + // set the flag and retryAfter value. + var retryable bool = false + var retryAfter time.Duration + if errors.Is(err, RetryableError) { + retryable = true + var re retryableError + if ok := errors.As(err, &re); ok { + retryAfter = re.RetryAfter() + } + } + + return databricksError{ + err: err, + correlationId: driverctx.CorrelationIdFromContext(ctx), + connectionId: driverctx.ConnIdFromContext(ctx), + errType: "unknown", + isRetryable: retryable, + retryAfter: retryAfter, + } +} + +func (e databricksError) Error() string { + return fmt.Sprintf("databricks: %s: %s", e.errType, e.err.Error()) +} + +func (e databricksError) Cause() error { + return e.err +} + +func (e databricksError) StackTrace() errors.StackTrace { + var st stackTracer + if ok := errors.As(e.err, &st); ok { + return st.StackTrace() + } + + return nil +} + +func (e databricksError) CorrelationId() string { + return e.correlationId +} + +func (e databricksError) ConnectionId() string { + return e.connectionId +} + +func (e databricksError) Is(err error) bool { + return err == dbsqlerr.DatabricksError +} + +func (e databricksError) IsRetryable() bool { + return e.isRetryable +} + +func (e databricksError) RetryAfter() time.Duration { + return e.retryAfter +} + +// driverError are issues with the driver or server, e.g. not supported operations, driver specific non-recoverable failures +type driverError struct { + databricksError +} + +var _ dbsqlerr.DBDriverError = (*driverError)(nil) + +func (e driverError) Is(err error) bool { + return err == dbsqlerr.DriverError || e.databricksError.Is(err) +} + +func (e driverError) Unwrap() error { + return e.err +} + +func NewDriverError(ctx context.Context, msg string, err error) *driverError { + dbErr := newDatabricksError(ctx, msg, err) + dbErr.errType = "driver error" + return &driverError{databricksError: dbErr} +} + +// requestError are errors caused by invalid requests, e.g. permission denied, warehouse not found +type requestError struct { + databricksError +} + +var _ dbsqlerr.DBRequestError = (*requestError)(nil) + +func (e requestError) Is(err error) bool { + return err == dbsqlerr.RequestError || e.databricksError.Is(err) +} + +func (e requestError) Unwrap() error { + return e.err +} + +func NewRequestError(ctx context.Context, msg string, err error) *requestError { + dbErr := newDatabricksError(ctx, msg, err) + dbErr.errType = "request error" + return &requestError{databricksError: dbErr} +} + +// executionError are errors occurring after the query has been submitted, e.g. invalid syntax, missing table, etc. +type executionError struct { + databricksError + queryId string + sqlState string +} + +var _ dbsqlerr.DBExecutionError = (*executionError)(nil) + +func (e executionError) Is(err error) bool { + return err == dbsqlerr.ExecutionError || e.databricksError.Is(err) +} + +func (e executionError) Unwrap() error { + return e.err +} + +func (e executionError) QueryId() string { + return e.queryId +} + +func (e executionError) SqlState() string { + return e.sqlState +} + +func NewExecutionError(ctx context.Context, msg string, err error, opStatusResp *cli_service.TGetOperationStatusResp) *executionError { + dbErr := newDatabricksError(ctx, msg, err) + dbErr.errType = "execution error" + var sqlState string + if opStatusResp != nil { + sqlState = opStatusResp.GetSqlState() + } + + return &executionError{databricksError: dbErr, queryId: driverctx.QueryIdFromContext(ctx), sqlState: sqlState} +} + +// wraps an error and adds trace if not already present +func WrapErr(err error, msg string) error { + var st stackTracer + if ok := errors.As(err, &st); ok { + // wrap passed in error in a new error with the message + return errors.WithMessage(err, msg) + } + + // wrap passed in error in errors with the message and a stack trace + return errors.Wrap(err, msg) +} + +// adds a stack trace if not already present +func WrapErrf(err error, format string, args ...interface{}) error { + var st stackTracer + if ok := errors.As(err, &st); ok { + // wrap passed in error in a new error with the formatted message + return errors.WithMessagef(err, format, args...) + } + + // wrap passed in error in errors with the formatted message and a stack trace + return errors.Wrapf(err, format, args...) +} + +type retryableError struct { + err error + retryAfter time.Duration +} + +func (e retryableError) Is(err error) bool { + return err == RetryableError +} + +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) Error() string { + return fmt.Sprintf("databricks: retryableError: %s", e.err.Error()) +} + +func (e retryableError) RetryAfter() time.Duration { + return e.retryAfter +} + +func NewRetryableError(err error, retryAfterHdr string) error { + if err == nil { + err = errors.New("") + } + + var st stackTracer + if ok := errors.As(err, &st); !ok { + err = errors.WithStack(err) + } + + var retryAfter time.Duration + if nSeconds, err := strconv.ParseInt(retryAfterHdr, 10, 64); err == nil { + retryAfter = time.Second * time.Duration(nSeconds) + } + + return retryableError{err: err, retryAfter: retryAfter} +} + +// badConnection error identifies as driver.ErrBadConn +// When added to the error stack the sql package will initiate retry behaviour with a new connection. +type badConnectionError struct { + err error +} + +func (e badConnectionError) Is(err error) bool { + return err == driver.ErrBadConn +} + +func (e badConnectionError) Unwrap() error { + return e.err +} + +func (e badConnectionError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "bad connection error" +} + +func NewBadConnectionError(err error) error { + return badConnectionError{err: err} +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRecordIterator.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRecordIterator.go new file mode 100644 index 00000000..583cbd04 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRecordIterator.go @@ -0,0 +1,172 @@ +package arrowbased + +import ( + "context" + "io" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerr "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + "github.com/databricks/databricks-sql-go/rows" +) + +func NewArrowRecordIterator(ctx context.Context, rpi rowscanner.ResultPageIterator, bi BatchIterator, arrowSchemaBytes []byte, cfg config.Config) rows.ArrowBatchIterator { + ari := arrowRecordIterator{ + cfg: cfg, + batchIterator: bi, + resultPageIterator: rpi, + ctx: ctx, + arrowSchemaBytes: arrowSchemaBytes, + } + + return &ari + +} + +// A type implemented DBSQLArrowBatchIterator +type arrowRecordIterator struct { + ctx context.Context + cfg config.Config + batchIterator BatchIterator + resultPageIterator rowscanner.ResultPageIterator + currentBatch SparkArrowBatch + isFinished bool + arrowSchemaBytes []byte +} + +var _ rows.ArrowBatchIterator = (*arrowRecordIterator)(nil) + +// Retrieve the next arrow record +func (ri *arrowRecordIterator) Next() (arrow.Record, error) { + if !ri.HasNext() { + // returning EOF indicates that there are no more records to iterate + return nil, io.EOF + } + + // make sure we have the current batch + err := ri.getCurrentBatch() + if err != nil { + return nil, err + } + + // return next record in current batch + r, err := ri.currentBatch.Next() + + ri.checkFinished() + + return r, err +} + +// Indicate whether there are any more records available +func (ri *arrowRecordIterator) HasNext() bool { + ri.checkFinished() + return !ri.isFinished +} + +// Free any resources associated with this iterator +func (ri *arrowRecordIterator) Close() { + if !ri.isFinished { + ri.isFinished = true + if ri.currentBatch != nil { + ri.currentBatch.Close() + } + + if ri.batchIterator != nil { + ri.batchIterator.Close() + } + + if ri.resultPageIterator != nil { + ri.resultPageIterator.Close() + } + } +} + +func (ri *arrowRecordIterator) checkFinished() { + finished := ri.isFinished || + ((ri.currentBatch == nil || !ri.currentBatch.HasNext()) && + (ri.batchIterator == nil || !ri.batchIterator.HasNext()) && + (ri.resultPageIterator == nil || !ri.resultPageIterator.HasNext())) + + if finished { + // Reached end of result set so Close + ri.Close() + } +} + +// Update the current batch if necessary +func (ri *arrowRecordIterator) getCurrentBatch() error { + + // only need to update if no current batch or current batch has no more records + if ri.currentBatch == nil || !ri.currentBatch.HasNext() { + + // ensure up to date batch iterator + err := ri.getBatchIterator() + if err != nil { + return err + } + + // release current batch + if ri.currentBatch != nil { + ri.currentBatch.Close() + } + + // Get next batch from batch iterator + ri.currentBatch, err = ri.batchIterator.Next() + if err != nil { + return err + } + } + + return nil +} + +// Update batch iterator if necessary +func (ri *arrowRecordIterator) getBatchIterator() error { + // only need to update if there is no batch iterator or the + // batch iterator has no more batches + if ri.batchIterator == nil || !ri.batchIterator.HasNext() { + if ri.batchIterator != nil { + // release any resources held by the current batch iterator + ri.batchIterator.Close() + ri.batchIterator = nil + } + + // Get the next page of the result set + resp, err := ri.resultPageIterator.Next() + if err != nil { + return err + } + + // Check the result format + resultFormat := resp.ResultSetMetadata.GetResultFormat() + if resultFormat != cli_service.TSparkRowSetType_ARROW_BASED_SET && resultFormat != cli_service.TSparkRowSetType_URL_BASED_SET { + return dbsqlerr.NewDriverError(ri.ctx, errArrowRowsNotArrowFormat, nil) + } + + if ri.arrowSchemaBytes == nil { + ri.arrowSchemaBytes = resp.ResultSetMetadata.ArrowSchema + } + + // Create a new batch iterator for the batches in the result page + bi, err := ri.newBatchIterator(resp) + if err != nil { + return err + } + + ri.batchIterator = bi + } + + return nil +} + +// Create a new batch iterator from a page of the result set +func (ri *arrowRecordIterator) newBatchIterator(fr *cli_service.TFetchResultsResp) (BatchIterator, error) { + rowSet := fr.Results + if len(rowSet.ResultLinks) > 0 { + return NewCloudBatchIterator(ri.ctx, rowSet.ResultLinks, rowSet.StartRowOffset, &ri.cfg) + } else { + return NewLocalBatchIterator(ri.ctx, rowSet.ArrowBatches, rowSet.StartRowOffset, ri.arrowSchemaBytes, &ri.cfg) + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRows.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRows.go new file mode 100644 index 00000000..f6a60c58 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/arrowRows.go @@ -0,0 +1,736 @@ +package arrowbased + +import ( + "bytes" + "context" + "database/sql/driver" + "io" + "time" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v12/arrow/ipc" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + dbsqllog "github.com/databricks/databricks-sql-go/logger" + dbsqlrows "github.com/databricks/databricks-sql-go/rows" + "github.com/pkg/errors" +) + +// Abstraction for a set of arrow records +type SparkArrowBatch interface { + rowscanner.Delimiter + Next() (SparkArrowRecord, error) + HasNext() bool + Close() +} + +// Abstraction for an arrow record +type SparkArrowRecord interface { + rowscanner.Delimiter + arrow.Record +} + +type timeStampFn func(arrow.Timestamp) time.Time + +type colInfo struct { + name string + arrowType arrow.DataType + dbType cli_service.TTypeId +} + +// arrowRowScanner handles extracting values from arrow records +type arrowRowScanner struct { + rowscanner.Delimiter + valueContainerMaker + + // configuration of different arrow options for retrieving results + config.ArrowConfig + + // arrow schema corresponding to the TTableSchema + arrowSchema *arrow.Schema + + // serialized form of arrow format schema + arrowSchemaBytes []byte + + // database types for the columns + colInfo []colInfo + + currentBatch SparkArrowBatch + + // Currently loaded field values for a set of rows + rowValues RowValues + + // function to convert arrow timestamp when using native arrow format + toTimestampFn timeStampFn + + // hold on to a logger instance with context, rather than just use the global variable + *dbsqllog.DBSQLLogger + + location *time.Location + + ctx context.Context + + batchIterator BatchIterator +} + +// Make sure arrowRowScanner fulfills the RowScanner interface +var _ rowscanner.RowScanner = (*arrowRowScanner)(nil) + +// NewArrowRowScanner returns an instance of RowScanner which handles arrow format results +func NewArrowRowScanner(resultSetMetadata *cli_service.TGetResultSetMetadataResp, rowSet *cli_service.TRowSet, cfg *config.Config, logger *dbsqllog.DBSQLLogger, ctx context.Context) (rowscanner.RowScanner, dbsqlerr.DBError) { + + // we take a passed in logger, rather than just using the global from dbsqllog, so that the containing rows + // instance can pass in a logger with context such as correlation ID and operation ID + if logger == nil { + logger = dbsqllog.Logger + } + + logger.Debug().Msgf("databricks: creating arrow row scanner, nArrowBatches: %d", len(rowSet.ArrowBatches)) + + var arrowConfig config.ArrowConfig + if cfg != nil { + arrowConfig = cfg.ArrowConfig + } + + schemaBytes, arrowSchema, metadataErr := tGetResultSetMetadataRespToArrowSchema(resultSetMetadata, arrowConfig, ctx, logger) + if metadataErr != nil { + return nil, metadataErr + } + + // Create column info + colInfos := getColumnInfo(arrowSchema, resultSetMetadata.Schema) + + // get the function for converting arrow timestamps to a time.Time + // time values from the server are returned as UTC with microsecond precision + ttsf, err := arrow.FixedWidthTypes.Timestamp_us.(*arrow.TimestampType).GetToTimeFunc() + if err != nil { + logger.Err(err).Msg(errArrowRowsToTimestampFn) + return nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsToTimestampFn, err) + } + + var bi BatchIterator + var err2 dbsqlerr.DBError + if len(rowSet.ResultLinks) > 0 { + logger.Debug().Msgf("Initialize CloudFetch loader, row set start offset: %d, file list:", rowSet.StartRowOffset) + for _, resultLink := range rowSet.ResultLinks { + logger.Debug().Msgf("- start row offset: %d, row count: %d", resultLink.StartRowOffset, resultLink.RowCount) + } + bi, err2 = NewCloudBatchIterator(context.Background(), rowSet.ResultLinks, rowSet.StartRowOffset, cfg) + } else { + bi, err2 = NewLocalBatchIterator(context.Background(), rowSet.ArrowBatches, rowSet.StartRowOffset, schemaBytes, cfg) + } + if err2 != nil { + return nil, err2 + } + + var location *time.Location = time.UTC + if cfg != nil { + if cfg.Location != nil { + location = cfg.Location + } + } + + rs := &arrowRowScanner{ + Delimiter: rowscanner.NewDelimiter(rowSet.StartRowOffset, rowscanner.CountRows(rowSet)), + valueContainerMaker: &arrowValueContainerMaker{}, + ArrowConfig: arrowConfig, + arrowSchemaBytes: schemaBytes, + arrowSchema: arrowSchema, + toTimestampFn: ttsf, + colInfo: colInfos, + DBSQLLogger: logger, + location: location, + batchIterator: bi, + } + + return rs, nil +} + +// Close is called when the Rows instance is closed. +func (ars *arrowRowScanner) Close() { + if ars.rowValues != nil { + ars.rowValues.Close() + } + + if ars.batchIterator != nil { + ars.batchIterator.Close() + } + + if ars.currentBatch != nil { + ars.currentBatch.Close() + } +} + +// NRows returns the number of rows in the current set of batches +func (ars *arrowRowScanner) NRows() int64 { + if ars != nil { + return ars.Count() + } + + return 0 +} + +var complexTypes map[cli_service.TTypeId]struct{} = map[cli_service.TTypeId]struct{}{ + cli_service.TTypeId_ARRAY_TYPE: {}, + cli_service.TTypeId_MAP_TYPE: {}, + cli_service.TTypeId_STRUCT_TYPE: {}} + +var intervalTypes map[cli_service.TTypeId]struct{} = map[cli_service.TTypeId]struct{}{ + cli_service.TTypeId_INTERVAL_DAY_TIME_TYPE: {}, + cli_service.TTypeId_INTERVAL_YEAR_MONTH_TYPE: {}} + +// ScanRow is called to populate the provided slice with the +// content of the current row. The provided slice will be the same +// size as the number of columns. +// The dest should not be written to outside of ScanRow. Care +// should be taken when closing a RowScanner not to modify +// a buffer held in dest. +func (ars *arrowRowScanner) ScanRow( + destination []driver.Value, + rowNumber int64) dbsqlerr.DBError { + + // load the error batch for the specified row, if necessary + err := ars.loadBatchFor(rowNumber) + if err != nil { + return err + } + + // if no location is provided default to UTC + if ars.location == nil { + ars.location = time.UTC + } + + // loop over the destination slice filling in values + for i := range destination { + // clear the destination + destination[i] = nil + + // if there is a corresponding column and the value for the specified row + // is not null we put the value in the destination + if !ars.rowValues.IsNull(i, rowNumber) { + + col := ars.colInfo[i] + dbType := col.dbType + + if (dbType == cli_service.TTypeId_DECIMAL_TYPE && ars.UseArrowNativeDecimal) || + (isIntervalType(dbType) && ars.UseArrowNativeIntervalTypes) { + // not yet fully supported + ars.Error().Msgf(errArrowRowsUnsupportedNativeType(dbType.String())) + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsUnsupportedNativeType(dbType.String()), nil) + } + + // get the value from the column values holder + var err1 error + destination[i], err1 = ars.rowValues.Value(i, rowNumber) + if err1 != nil { + err = dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsColumnValue(col.name), err1) + } + } + } + + return err +} + +func isIntervalType(typeId cli_service.TTypeId) bool { + _, ok := intervalTypes[typeId] + return ok +} + +// loadBatchFor loads the batch containing the specified row if necessary +func (ars *arrowRowScanner) loadBatchFor(rowNumber int64) dbsqlerr.DBError { + + if ars == nil { + return dbsqlerrint.NewDriverError(context.Background(), errArrowRowsNoArrowBatches, nil) + } + + if ars.batchIterator == nil { + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsNoArrowBatches, nil) + } + + // if the batch already loaded we can just return + if ars.rowValues != nil && ars.rowValues.Contains(rowNumber) { + return nil + } + + // check for things like going backwards, rowNumber < 0, etc. + err := ars.validateRowNumber(rowNumber) + if err != nil { + return err + } + + // Find the batch containing the row number, if necessary + for ars.currentBatch == nil || !ars.currentBatch.Contains(rowNumber) { + batch, err := ars.batchIterator.Next() + if err != nil { + if dbErr, ok := err.(dbsqlerr.DBError); ok { + return dbErr + } else { + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsInvalidRowNumber(rowNumber), err) + } + } + + ars.currentBatch = batch + } + + // Get the next arrow record from the current batch + sar, err2 := ars.currentBatch.Next() + if err2 != nil { + ars.Err(err2).Msg(errArrowRowsUnableToReadBatch) + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsUnableToReadBatch, err2) + } + + defer sar.Release() + + // set up the column values containers + if ars.rowValues == nil { + err := ars.makeColumnValuesContainers(ars, rowscanner.NewDelimiter(sar.Start(), sar.Count())) + if err != nil { + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsMakeColumnValueContainers, err) + } + } + + // for each column we want to create an arrow array specific to the data type + for i, col := range sar.Columns() { + func() { + col.Retain() + defer col.Release() + + colData := col.Data() + colData.Retain() + defer colData.Release() + + err := ars.rowValues.SetColumnValues(i, colData) + if err != nil { + ars.Error().Msg(err.Error()) + } + }() + } + + // Update the delimiter in rowValues to reflect the currently loaded set of rows + ars.rowValues.SetDelimiter(rowscanner.NewDelimiter(sar.Start(), sar.Count())) + return nil +} + +// Check that the row number falls within the range of this row scanner and that +// it is not moving backwards. +func (ars *arrowRowScanner) validateRowNumber(rowNumber int64) dbsqlerr.DBError { + if rowNumber < 0 || rowNumber > ars.End() || (ars.currentBatch != nil && ars.currentBatch.Direction(rowNumber) == rowscanner.DirBack) { + return dbsqlerrint.NewDriverError(ars.ctx, errArrowRowsInvalidRowNumber(rowNumber), nil) + } + return nil +} + +func (ars *arrowRowScanner) GetArrowBatches(ctx context.Context, cfg config.Config, rpi rowscanner.ResultPageIterator) (dbsqlrows.ArrowBatchIterator, error) { + ri := NewArrowRecordIterator(ctx, rpi, ars.batchIterator, ars.arrowSchemaBytes, cfg) + return ri, nil +} + +// getArrowSchemaBytes returns the serialized schema in ipc format +func getArrowSchemaBytes(schema *arrow.Schema, ctx context.Context) ([]byte, dbsqlerr.DBError) { + if schema == nil { + return nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsNilArrowSchema, nil) + } + + var output bytes.Buffer + w := ipc.NewWriter(&output, ipc.WithSchema(schema)) + err := w.Close() + if err != nil { + return nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsUnableToWriteArrowSchema, err) + } + + arrowSchemaBytes := output.Bytes() + + // the writer serializes to an arrow batch but we just want the + // schema bytes so we strip off the empty Record at the end + arrowSchemaBytes = arrowSchemaBytes[:len(arrowSchemaBytes)-8] + + return arrowSchemaBytes, nil +} + +// tTableSchemaToArrowSchema convers the TTableSchema retrieved by the thrift server into an arrow.Schema instance +func tTableSchemaToArrowSchema(schema *cli_service.TTableSchema, arrowConfig *config.ArrowConfig) (*arrow.Schema, error) { + columns := schema.GetColumns() + fields := make([]arrow.Field, len(columns)) + + for i := range columns { + field, err := tColumnDescToArrowField(columns[i], arrowConfig) + if err != nil { + return nil, err + } + + fields[i] = field + } + + arrowSchema := arrow.NewSchema(fields, nil) + + return arrowSchema, nil +} + +// map the thrift data types to the corresponding arrow data type +var toArrowTypeMap map[cli_service.TTypeId]arrow.DataType = map[cli_service.TTypeId]arrow.DataType{ + cli_service.TTypeId_BOOLEAN_TYPE: arrow.FixedWidthTypes.Boolean, + cli_service.TTypeId_TINYINT_TYPE: arrow.PrimitiveTypes.Int8, + cli_service.TTypeId_SMALLINT_TYPE: arrow.PrimitiveTypes.Int16, + cli_service.TTypeId_INT_TYPE: arrow.PrimitiveTypes.Int32, + cli_service.TTypeId_BIGINT_TYPE: arrow.PrimitiveTypes.Int64, + cli_service.TTypeId_FLOAT_TYPE: arrow.PrimitiveTypes.Float32, + cli_service.TTypeId_DOUBLE_TYPE: arrow.PrimitiveTypes.Float64, + cli_service.TTypeId_STRING_TYPE: arrow.BinaryTypes.String, + // cli_service.TTypeId_TIMESTAMP_TYPE: see tColumnDescToArrowDataType + cli_service.TTypeId_BINARY_TYPE: arrow.BinaryTypes.Binary, + // cli_service.TTypeId_ARRAY_TYPE: see tColumnDescToArrowDataType + // cli_service.TTypeId_MAP_TYPE: see tColumnDescToArrowDataType + // cli_service.TTypeId_STRUCT_TYPE: see tColumnDescToArrowDataType + cli_service.TTypeId_UNION_TYPE: arrow.BinaryTypes.String, + cli_service.TTypeId_USER_DEFINED_TYPE: arrow.BinaryTypes.String, + // cli_service.TTypeId_DECIMAL_TYPE: see tColumnDescToArrowDataType + cli_service.TTypeId_NULL_TYPE: arrow.Null, + cli_service.TTypeId_DATE_TYPE: arrow.FixedWidthTypes.Date32, + cli_service.TTypeId_VARCHAR_TYPE: arrow.BinaryTypes.String, + cli_service.TTypeId_CHAR_TYPE: arrow.BinaryTypes.String, + // cli_service.TTypeId_INTERVAL_YEAR_MONTH_TYPE: see tColumnDescToArrowDataType + // cli_service.TTypeId_INTERVAL_DAY_TIME_TYPE: see tColumnDescToArrowDataType +} + +func tColumnDescToArrowDataType(tColumnDesc *cli_service.TColumnDesc, arrowConfig *config.ArrowConfig) (arrow.DataType, error) { + // get the thrift type id + tType := rowscanner.GetDBTypeID(tColumnDesc) + + if at, ok := toArrowTypeMap[tType]; ok { + // simple type mapping + return at, nil + } else { + // for some types there isn't a simple 1:1 correspondence to an arrow data type + if tType == cli_service.TTypeId_DECIMAL_TYPE { + // if not using arrow native decimal type decimals are returned as strings + if !arrowConfig.UseArrowNativeDecimal { + return arrow.BinaryTypes.String, nil + } + + // Need to construct an instance of arrow DecimalType with the + // correct scale and precision + scale, precision, err := getDecimalScalePrecision(tColumnDesc) + if err != nil { + return nil, err + } + + decimalType, err := arrow.NewDecimalType(arrow.DECIMAL128, precision, scale) + if err != nil { + return nil, dbsqlerrint.WrapErr(err, errArrowRowsUnableToCreateDecimalType(scale, precision)) + } + + return decimalType, nil + + } else if tType == cli_service.TTypeId_TIMESTAMP_TYPE { + // if not using arrow native timestamps thrift server returns strings + if !arrowConfig.UseArrowNativeTimestamp { + return arrow.BinaryTypes.String, nil + } + + // timestamp is UTC with microsecond precision + return arrow.FixedWidthTypes.Timestamp_us, nil + } else if _, ok := complexTypes[tType]; ok { + // if not using arrow native complex types thrift server returns strings + if !arrowConfig.UseArrowNativeComplexTypes { + return arrow.BinaryTypes.String, nil + } + + return nil, errors.New(errArrowRowsUnsupportedWithHiveSchema(rowscanner.GetDBTypeName(tColumnDesc))) + } else if _, ok := intervalTypes[tType]; ok { + // if not using arrow native complex types thrift server returns strings + if !arrowConfig.UseArrowNativeIntervalTypes { + return arrow.BinaryTypes.String, nil + } + + return nil, errors.New(errArrowRowsUnsupportedWithHiveSchema(rowscanner.GetDBTypeName(tColumnDesc))) + } else { + return nil, errors.New(errArrowRowsUnknownDBType) + } + } + +} + +func getDecimalScalePrecision(tColumnDesc *cli_service.TColumnDesc) (scale, precision int32, err error) { + fail := errors.New(errArrowRowsInvalidDecimalType) + + typeQualifiers := rowscanner.GetDBTypeQualifiers(tColumnDesc) + if typeQualifiers == nil || typeQualifiers.Qualifiers == nil { + err = fail + return + } + + scaleHolder, ok := typeQualifiers.Qualifiers["scale"] + if !ok || scaleHolder == nil || scaleHolder.I32Value == nil { + err = fail + return + } else { + scale = *scaleHolder.I32Value + } + + precisionHolder, ok := typeQualifiers.Qualifiers["precision"] + if !ok || precisionHolder == nil || precisionHolder.I32Value == nil { + err = fail + return + } else { + precision = *precisionHolder.I32Value + } + + return +} + +func tColumnDescToArrowField(columnDesc *cli_service.TColumnDesc, arrowConfig *config.ArrowConfig) (arrow.Field, error) { + arrowDataType, err := tColumnDescToArrowDataType(columnDesc, arrowConfig) + if err != nil { + return arrow.Field{}, err + } + + arrowField := arrow.Field{ + Name: columnDesc.ColumnName, + Type: arrowDataType, + } + + return arrowField, nil +} + +// Build a slice of columnInfo using the arrow schema and the thrift schema +func getColumnInfo(arrowSchema *arrow.Schema, schema *cli_service.TTableSchema) []colInfo { + if arrowSchema == nil || schema == nil { + return []colInfo{} + } + + nFields := len(arrowSchema.Fields()) + if len(schema.Columns) < nFields { + nFields = len(schema.Columns) + } + + colInfos := make([]colInfo, nFields) + for i := 0; i < nFields; i++ { + col := schema.Columns[i] + field := arrowSchema.Field(i) + colInfos[i] = colInfo{name: field.Name, arrowType: field.Type, dbType: rowscanner.GetDBType(col)} + } + + return colInfos +} + +// Derive an arrow.Schema object and the corresponding serialized bytes from TGetResultSetMetadataResp +func tGetResultSetMetadataRespToArrowSchema(resultSetMetadata *cli_service.TGetResultSetMetadataResp, arrowConfig config.ArrowConfig, ctx context.Context, logger *dbsqllog.DBSQLLogger) ([]byte, *arrow.Schema, dbsqlerr.DBError) { + + var arrowSchema *arrow.Schema + schemaBytes := resultSetMetadata.ArrowSchema + if schemaBytes == nil { + var err error + // convert the TTableSchema to an arrow Schema + arrowSchema, err = tTableSchemaToArrowSchema(resultSetMetadata.Schema, &arrowConfig) + if err != nil { + logger.Err(err).Msg(errArrowRowsConvertSchema) + return nil, nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsConvertSchema, err) + } + + // serialize the arrow schema + schemaBytes, err = getArrowSchemaBytes(arrowSchema, ctx) + if err != nil { + logger.Err(err).Msg(errArrowRowsSerializeSchema) + return nil, nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsSerializeSchema, err) + } + } else { + br := bytes.NewReader(schemaBytes) + rdr, err := ipc.NewReader(br) + if err != nil { + return nil, nil, dbsqlerrint.NewDriverError(ctx, errArrowRowsUnableToReadBatch, err) + } + defer rdr.Release() + + arrowSchema = rdr.Schema() + } + + return schemaBytes, arrowSchema, nil +} + +type arrowValueContainerMaker struct{} + +var _ valueContainerMaker = (*arrowValueContainerMaker)(nil) + +// makeColumnValuesContainers creates appropriately typed column values holders for each column +func (vcm *arrowValueContainerMaker) makeColumnValuesContainers(ars *arrowRowScanner, d rowscanner.Delimiter) error { + if ars.rowValues == nil { + columnValueHolders := make([]columnValues, len(ars.colInfo)) + for i, field := range ars.arrowSchema.Fields() { + holder, err := vcm.makeColumnValueContainer(field.Type, ars.location, ars.toTimestampFn, &ars.colInfo[i]) + if err != nil { + ars.Error().Msg(err.Error()) + return err + } + + columnValueHolders[i] = holder + } + + ars.rowValues = NewRowValues(d, columnValueHolders) + } + + return nil +} + +func (vcm *arrowValueContainerMaker) makeColumnValueContainer(t arrow.DataType, location *time.Location, toTimestampFn timeStampFn, colInfo *colInfo) (columnValues, error) { + if location == nil { + location = time.UTC + } + + switch t := t.(type) { + + case *arrow.BooleanType: + return &columnValuesTyped[*array.Boolean, bool]{}, nil + + case *arrow.Int8Type: + return &columnValuesTyped[*array.Int8, int8]{}, nil + + case *arrow.Int16Type: + return &columnValuesTyped[*array.Int16, int16]{}, nil + + case *arrow.Int32Type: + return &columnValuesTyped[*array.Int32, int32]{}, nil + + case *arrow.Int64Type: + return &columnValuesTyped[*array.Int64, int64]{}, nil + + case *arrow.Float32Type: + return &columnValuesTyped[*array.Float32, float32]{}, nil + + case *arrow.Float64Type: + return &columnValuesTyped[*array.Float64, float64]{}, nil + + case *arrow.StringType: + if colInfo != nil && colInfo.dbType == cli_service.TTypeId_TIMESTAMP_TYPE { + return ×tampStringValueContainer{location: location, fieldName: colInfo.name}, nil + } else { + return &columnValuesTyped[*array.String, string]{}, nil + } + + case *arrow.Decimal128Type: + return &decimal128Container{scale: t.Scale}, nil + + case *arrow.Date32Type: + return &dateValueContainer{location: location}, nil + + case *arrow.TimestampType: + return ×tampValueContainer{location: location, toTimestampFn: toTimestampFn}, nil + + case *arrow.BinaryType: + return &columnValuesTyped[*array.Binary, []byte]{}, nil + + case *arrow.ListType: + lvc := &listValueContainer{listArrayType: t} + var err error + lvc.values, err = vcm.makeColumnValueContainer(t.Elem(), location, toTimestampFn, nil) + if err != nil { + return nil, err + } + switch t.Elem().(type) { + case *arrow.MapType, *arrow.ListType, *arrow.StructType: + lvc.complexValue = true + } + return lvc, nil + + case *arrow.MapType: + mvc := &mapValueContainer{mapArrayType: t} + var err error + mvc.values, err = vcm.makeColumnValueContainer(t.ItemType(), location, toTimestampFn, nil) + if err != nil { + return nil, err + } + mvc.keys, err = vcm.makeColumnValueContainer(t.KeyType(), location, toTimestampFn, nil) + if err != nil { + return nil, err + } + switch t.ItemType().(type) { + case *arrow.MapType, *arrow.ListType, *arrow.StructType: + mvc.complexValue = true + } + + return mvc, nil + + case *arrow.StructType: + svc := &structValueContainer{structArrayType: t} + svc.fieldNames = make([]string, len(t.Fields())) + svc.fieldValues = make([]columnValues, len(t.Fields())) + svc.complexValue = make([]bool, len(t.Fields())) + for i, f := range t.Fields() { + svc.fieldNames[i] = f.Name + c, err := vcm.makeColumnValueContainer(f.Type, location, toTimestampFn, nil) + if err != nil { + return nil, err + } + svc.fieldValues[i] = c + switch f.Type.(type) { + case *arrow.MapType, *arrow.ListType, *arrow.StructType: + svc.complexValue[i] = true + } + + } + + return svc, nil + + case *arrow.NullType: + return nullContainer, nil + + default: + return nil, errors.Errorf(errArrowRowsUnhandledArrowType(t.String())) + } +} + +// Container for a set of arrow records +type sparkArrowBatch struct { + // Delimiter indicating the range of rows covered by the arrow records + rowscanner.Delimiter + arrowRecords []SparkArrowRecord +} + +var _ SparkArrowBatch = (*sparkArrowBatch)(nil) + +func (b *sparkArrowBatch) Next() (SparkArrowRecord, error) { + if len(b.arrowRecords) > 0 { + r := b.arrowRecords[0] + // remove the record from the slice as iteration is only forwards + b.arrowRecords = b.arrowRecords[1:] + return r, nil + } + + // no more records + return nil, io.EOF +} + +func (b *sparkArrowBatch) HasNext() bool { return b != nil && len(b.arrowRecords) > 0 } + +func (b *sparkArrowBatch) Close() { + // Release any arrow records + for i := range b.arrowRecords { + b.arrowRecords[i].Release() + } + b.arrowRecords = nil +} + +// Composite of an arrow record and a delimiter indicating +// the rows corresponding to the record. +type sparkArrowRecord struct { + rowscanner.Delimiter + arrow.Record +} + +var _ SparkArrowRecord = (*sparkArrowRecord)(nil) + +func (sar *sparkArrowRecord) Release() { + if sar.Record != nil { + sar.Record.Release() + sar.Record = nil + } +} + +func (sar *sparkArrowRecord) Retain() { + if sar.Record != nil { + sar.Record.Retain() + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/batchloader.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/batchloader.go new file mode 100644 index 00000000..45b067dd --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/batchloader.go @@ -0,0 +1,337 @@ +package arrowbased + +import ( + "bytes" + "context" + "fmt" + "io" + "time" + + "github.com/databricks/databricks-sql-go/internal/config" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + "github.com/pierrec/lz4/v4" + "github.com/pkg/errors" + + "net/http" + + "github.com/apache/arrow/go/v12/arrow/ipc" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/logger" +) + +type BatchIterator interface { + Next() (SparkArrowBatch, error) + HasNext() bool + Close() +} + +func NewCloudBatchIterator( + ctx context.Context, + files []*cli_service.TSparkArrowResultLink, + startRowOffset int64, + cfg *config.Config, +) (BatchIterator, dbsqlerr.DBError) { + bi := &cloudBatchIterator{ + ctx: ctx, + cfg: cfg, + startRowOffset: startRowOffset, + pendingLinks: NewQueue[cli_service.TSparkArrowResultLink](), + downloadTasks: NewQueue[cloudFetchDownloadTask](), + } + + for _, link := range files { + bi.pendingLinks.Enqueue(link) + } + + return bi, nil +} + +func NewLocalBatchIterator( + ctx context.Context, + batches []*cli_service.TSparkArrowBatch, + startRowOffset int64, + arrowSchemaBytes []byte, + cfg *config.Config, +) (BatchIterator, dbsqlerr.DBError) { + bi := &localBatchIterator{ + cfg: cfg, + startRowOffset: startRowOffset, + arrowSchemaBytes: arrowSchemaBytes, + batches: batches, + index: -1, + } + + return bi, nil +} + +type localBatchIterator struct { + cfg *config.Config + startRowOffset int64 + arrowSchemaBytes []byte + batches []*cli_service.TSparkArrowBatch + index int +} + +var _ BatchIterator = (*localBatchIterator)(nil) + +func (bi *localBatchIterator) Next() (SparkArrowBatch, error) { + cnt := len(bi.batches) + bi.index++ + if bi.index < cnt { + ab := bi.batches[bi.index] + + reader := io.MultiReader( + bytes.NewReader(bi.arrowSchemaBytes), + getReader(bytes.NewReader(ab.Batch), bi.cfg.UseLz4Compression), + ) + + records, err := getArrowRecords(reader, bi.startRowOffset) + if err != nil { + return &sparkArrowBatch{}, err + } + + batch := sparkArrowBatch{ + Delimiter: rowscanner.NewDelimiter(bi.startRowOffset, ab.RowCount), + arrowRecords: records, + } + + bi.startRowOffset += ab.RowCount // advance to beginning of the next batch + + return &batch, nil + } + + bi.index = cnt + return nil, io.EOF +} + +func (bi *localBatchIterator) HasNext() bool { + // `Next()` will first increment an index, and only then return a batch + // So `HasNext` should check if index can be incremented and still be within array + return bi.index+1 < len(bi.batches) +} + +func (bi *localBatchIterator) Close() { + bi.index = len(bi.batches) +} + +type cloudBatchIterator struct { + ctx context.Context + cfg *config.Config + startRowOffset int64 + pendingLinks Queue[cli_service.TSparkArrowResultLink] + downloadTasks Queue[cloudFetchDownloadTask] +} + +var _ BatchIterator = (*cloudBatchIterator)(nil) + +func (bi *cloudBatchIterator) Next() (SparkArrowBatch, error) { + for (bi.downloadTasks.Len() < bi.cfg.MaxDownloadThreads) && (bi.pendingLinks.Len() > 0) { + link := bi.pendingLinks.Dequeue() + logger.Debug().Msgf( + "CloudFetch: schedule link at offset %d row count %d", + link.StartRowOffset, + link.RowCount, + ) + + cancelCtx, cancelFn := context.WithCancel(bi.ctx) + task := &cloudFetchDownloadTask{ + ctx: cancelCtx, + cancel: cancelFn, + useLz4Compression: bi.cfg.UseLz4Compression, + link: link, + resultChan: make(chan cloudFetchDownloadTaskResult), + minTimeToExpiry: bi.cfg.MinTimeToExpiry, + } + task.Run() + bi.downloadTasks.Enqueue(task) + } + + task := bi.downloadTasks.Dequeue() + if task == nil { + return nil, io.EOF + } + + batch, err := task.GetResult() + + // once we've got an errored out task - cancel the remaining ones + if err != nil { + bi.Close() + return nil, err + } + + // explicitly call cancel function on successfully completed task to avoid context leak + task.cancel() + return batch, nil +} + +func (bi *cloudBatchIterator) HasNext() bool { + return (bi.pendingLinks.Len() > 0) || (bi.downloadTasks.Len() > 0) +} + +func (bi *cloudBatchIterator) Close() { + bi.pendingLinks.Clear() + for bi.downloadTasks.Len() > 0 { + task := bi.downloadTasks.Dequeue() + task.cancel() + } +} + +type cloudFetchDownloadTaskResult struct { + batch SparkArrowBatch + err error +} + +type cloudFetchDownloadTask struct { + ctx context.Context + cancel context.CancelFunc + useLz4Compression bool + minTimeToExpiry time.Duration + link *cli_service.TSparkArrowResultLink + resultChan chan cloudFetchDownloadTaskResult +} + +func (cft *cloudFetchDownloadTask) GetResult() (SparkArrowBatch, error) { + link := cft.link + + result, ok := <-cft.resultChan + if ok { + if result.err != nil { + logger.Debug().Msgf( + "CloudFetch: failed to download link at offset %d row count %d, reason: %s", + link.StartRowOffset, + link.RowCount, + result.err.Error(), + ) + return nil, result.err + } + logger.Debug().Msgf( + "CloudFetch: received data for link at offset %d row count %d", + link.StartRowOffset, + link.RowCount, + ) + return result.batch, nil + } + + // This branch should never be reached. If you see this message - something got really wrong + logger.Debug().Msgf( + "CloudFetch: channel was closed before result was received; link at offset %d row count %d", + link.StartRowOffset, + link.RowCount, + ) + return nil, nil +} + +func (cft *cloudFetchDownloadTask) Run() { + go func() { + defer close(cft.resultChan) + + logger.Debug().Msgf( + "CloudFetch: start downloading link at offset %d row count %d", + cft.link.StartRowOffset, + cft.link.RowCount, + ) + data, err := fetchBatchBytes(cft.ctx, cft.link, cft.minTimeToExpiry) + if err != nil { + cft.resultChan <- cloudFetchDownloadTaskResult{batch: nil, err: err} + return + } + + // io.ReadCloser.Close() may return an error, but in this case it should be safe to ignore (I hope so) + defer data.Close() + + logger.Debug().Msgf( + "CloudFetch: reading records for link at offset %d row count %d", + cft.link.StartRowOffset, + cft.link.RowCount, + ) + reader := getReader(data, cft.useLz4Compression) + + records, err := getArrowRecords(reader, cft.link.StartRowOffset) + if err != nil { + cft.resultChan <- cloudFetchDownloadTaskResult{batch: nil, err: err} + return + } + + batch := sparkArrowBatch{ + Delimiter: rowscanner.NewDelimiter(cft.link.StartRowOffset, cft.link.RowCount), + arrowRecords: records, + } + cft.resultChan <- cloudFetchDownloadTaskResult{batch: &batch, err: nil} + }() +} + +func fetchBatchBytes( + ctx context.Context, + link *cli_service.TSparkArrowResultLink, + minTimeToExpiry time.Duration, +) (io.ReadCloser, error) { + if isLinkExpired(link.ExpiryTime, minTimeToExpiry) { + return nil, errors.New(dbsqlerr.ErrLinkExpired) + } + + // TODO: Retry on HTTP errors + req, err := http.NewRequestWithContext(ctx, "GET", link.FileLink, nil) + if err != nil { + return nil, err + } + + client := http.DefaultClient + res, err := client.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusOK { + msg := fmt.Sprintf("%s: %s %d", errArrowRowsCloudFetchDownloadFailure, "HTTP error", res.StatusCode) + return nil, dbsqlerrint.NewDriverError(ctx, msg, err) + } + + return res.Body, nil +} + +func getReader(r io.Reader, useLz4Compression bool) io.Reader { + if useLz4Compression { + return lz4.NewReader(r) + } + return r +} + +func isLinkExpired(expiryTime int64, linkExpiryBuffer time.Duration) bool { + bufferSecs := int64(linkExpiryBuffer.Seconds()) + return expiryTime-bufferSecs < time.Now().Unix() +} + +func getArrowRecords(r io.Reader, startRowOffset int64) ([]SparkArrowRecord, error) { + ipcReader, err := ipc.NewReader(r) + if err != nil { + return nil, err + } + + defer ipcReader.Release() + + startRow := startRowOffset + var records []SparkArrowRecord + for ipcReader.Next() { + r := ipcReader.Record() + r.Retain() + + sar := sparkArrowRecord{ + Delimiter: rowscanner.NewDelimiter(startRow, r.NumRows()), + Record: r, + } + + records = append(records, &sar) + + startRow += r.NumRows() + } + + if ipcReader.Err() != nil { + for i := range records { + records[i].Release() + } + return nil, ipcReader.Err() + } + + return records, nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/columnValues.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/columnValues.go new file mode 100644 index 00000000..0b6fc7d8 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/columnValues.go @@ -0,0 +1,573 @@ +package arrowbased + +import ( + "encoding/json" + "strings" + "time" + + "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v12/arrow/array" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + dbsqllog "github.com/databricks/databricks-sql-go/logger" + "github.com/pkg/errors" +) + +// Abstraction for holding the values for a set of rows +type RowValues interface { + rowscanner.Delimiter + Close() + NColumns() int + SetColumnValues(columnIndex int, values arrow.ArrayData) error + IsNull(columnIndex int, rowNumber int64) bool + Value(columnIndex int, rowNumber int64) (any, error) + SetDelimiter(d rowscanner.Delimiter) +} + +func NewRowValues(d rowscanner.Delimiter, holders []columnValues) RowValues { + return &rowValues{Delimiter: d, columnValueHolders: holders} +} + +type rowValues struct { + rowscanner.Delimiter + columnValueHolders []columnValues +} + +var _ RowValues = (*rowValues)(nil) + +func (rv *rowValues) Close() { + // release any retained arrow arrays + for i := range rv.columnValueHolders { + if rv.columnValueHolders[i] != nil { + rv.columnValueHolders[i].Release() + } + } +} + +func (rv *rowValues) SetColumnValues(columnIndex int, values arrow.ArrayData) error { + var err error + if columnIndex < len(rv.columnValueHolders) && rv.columnValueHolders[columnIndex] != nil { + rv.columnValueHolders[columnIndex].Release() + err = rv.columnValueHolders[columnIndex].SetValueArray(values) + } + return err +} + +func (rv *rowValues) IsNull(columnIndex int, rowNumber int64) bool { + var b bool = true + if columnIndex < len(rv.columnValueHolders) { + b = rv.columnValueHolders[columnIndex].IsNull(int(rowNumber - rv.Start())) + } + return b +} + +func (rv *rowValues) Value(columnIndex int, rowNumber int64) (any, error) { + var err error + var value any + if columnIndex < len(rv.columnValueHolders) { + value, err = rv.columnValueHolders[columnIndex].Value(int(rowNumber - rv.Start())) + } + return value, err +} + +func (rv *rowValues) NColumns() int { return len(rv.columnValueHolders) } + +func (rv *rowValues) SetDelimiter(d rowscanner.Delimiter) { rv.Delimiter = d } + +type valueContainerMaker interface { + makeColumnValuesContainers(ars *arrowRowScanner, d rowscanner.Delimiter) error +} + +// columnValues is the interface for accessing the values for a column +type columnValues interface { + Value(int) (any, error) + IsNull(int) bool + Release() + SetValueArray(colData arrow.ArrayData) error +} + +// a type constraint for the simple value types which can be handled by the generic +// implementation of columnValues +type valueTypes interface { + bool | + int8 | + int16 | + int32 | + int64 | + float32 | + float64 | + string | + []byte +} + +// a type constraint for the arrow array types which can be handled by the generic +// implementation of columnValues +type arrowArrayTypes interface { + *array.Boolean | + *array.Int8 | + *array.Int16 | + *array.Int32 | + *array.Int64 | + *array.Float32 | + *array.Float64 | + *array.String | + *array.Binary +} + +// type constraint for wrapping arrow arrays +type columnValuesHolder[T valueTypes] interface { + arrowArrayTypes + Value(int) T + IsNull(int) bool + Release() +} + +// a generic container for the arrow arrays/value types we handle +type columnValuesTyped[ValueHolderType columnValuesHolder[ValueType], ValueType valueTypes] struct { + holder ValueHolderType + foo ValueType +} + +// return the value for the specified row +func (cv *columnValuesTyped[X, T]) Value(rowNum int) (any, error) { + return cv.holder.Value(rowNum), nil +} + +// return true if the value at rowNum is null +func (cv *columnValuesTyped[X, T]) IsNull(rowNum int) bool { + return cv.holder.IsNull(rowNum) +} + +// release the the contained arrow array +func (cv *columnValuesTyped[X, T]) Release() { + if cv.holder != nil { + cv.holder.Release() + } +} + +func (cv *columnValuesTyped[X, T]) SetValueArray(colData arrow.ArrayData) error { + var colValsHolder columnValues = cv + switch t := any(cv.foo).(type) { + case bool: + colValsHolder.(*columnValuesTyped[*array.Boolean, bool]).holder = array.NewBooleanData(colData) + + case int8: + colValsHolder.(*columnValuesTyped[*array.Int8, int8]).holder = array.NewInt8Data(colData) + + case int16: + colValsHolder.(*columnValuesTyped[*array.Int16, int16]).holder = array.NewInt16Data(colData) + + case int32: + colValsHolder.(*columnValuesTyped[*array.Int32, int32]).holder = array.NewInt32Data(colData) + + case int64: + colValsHolder.(*columnValuesTyped[*array.Int64, int64]).holder = array.NewInt64Data(colData) + + case float32: + colValsHolder.(*columnValuesTyped[*array.Float32, float32]).holder = array.NewFloat32Data(colData) + + case float64: + colValsHolder.(*columnValuesTyped[*array.Float64, float64]).holder = array.NewFloat64Data(colData) + + case string: + colValsHolder.(*columnValuesTyped[*array.String, string]).holder = array.NewStringData(colData) + + case []byte: + colValsHolder.(*columnValuesTyped[*array.Binary, []byte]).holder = array.NewBinaryData(colData) + + default: + return errors.New(errArrowRowsUnhandledArrowType(t)) + } + + return nil +} + +// ensure the columnValuesTyped implements columnValues +var _ columnValues = (*columnValuesTyped[*array.Int16, int16])(nil) + +type listValueContainer struct { + listArray array.ListLike + values columnValues + complexValue bool + listArrayType *arrow.ListType +} + +var _ columnValues = (*listValueContainer)(nil) + +func (lvc *listValueContainer) Value(i int) (any, error) { + if i < lvc.listArray.Len() { + r := "[" + s, e := lvc.listArray.ValueOffsets(i) + len := int(e - s) + + for i := 0; i < len; i++ { + if lvc.values.IsNull(i + int(s)) { + r = r + "null" + } else { + + val, err := lvc.values.Value(i + int(s)) + if err != nil { + return nil, err + } + + if !lvc.complexValue { + vb, err := marshal(val) + if err != nil { + return nil, err + } + r = r + string(vb) + } else { + r = r + val.(string) + } + } + + if i < len-1 { + r = r + "," + } + } + + r = r + "]" + return r, nil + } + return nil, nil +} + +func (lvc *listValueContainer) IsNull(i int) bool { + return lvc.listArray.IsNull(i) +} + +func (lvc *listValueContainer) Release() { + if lvc.listArray != nil { + lvc.listArray.Release() + } + + if lvc.values != nil { + lvc.values.Release() + } +} + +func (lvc *listValueContainer) SetValueArray(colData arrow.ArrayData) error { + lvc.listArray = array.NewListData(colData) + lvs := lvc.listArray.ListValues() + err := lvc.values.SetValueArray(lvs.Data()) + + return err +} + +type mapValueContainer struct { + mapArray *array.Map + keys columnValues + values columnValues + complexValue bool + mapArrayType *arrow.MapType +} + +var _ columnValues = (*mapValueContainer)(nil) + +func (mvc *mapValueContainer) Value(i int) (any, error) { + if i < mvc.mapArray.Len() { + s, e := mvc.mapArray.ValueOffsets(i) + len := e - s + r := "{" + for i := int64(0); i < len; i++ { + k, err := mvc.keys.Value(int(i + s)) + if err != nil { + return nil, err + } + + key, err := marshal(k) + if err != nil { + return nil, err + } + + v, err := mvc.values.Value(int(i + s)) + if err != nil { + return nil, err + } + + var b string + if mvc.values.IsNull(int(i + s)) { + b = "null" + } else if mvc.complexValue { + b = v.(string) + } else { + vb, err := marshal(v) + if err != nil { + return nil, err + } + b = string(vb) + } + + if !strings.HasPrefix(string(key), "\"") { + r = r + "\"" + string(key) + "\":" + } else { + r = r + string(key) + ":" + } + + r = r + b + if i < len-1 { + r = r + "," + } + } + r = r + "}" + + return r, nil + } + return nil, nil +} + +func (mvc *mapValueContainer) IsNull(i int) bool { + return mvc.mapArray.IsNull(i) +} + +func (mvc *mapValueContainer) Release() { + if mvc.mapArray != nil { + mvc.mapArray.Release() + } + + if mvc.values != nil { + mvc.values.Release() + } + + if mvc.keys != nil { + mvc.keys.Release() + } +} + +func (mvc *mapValueContainer) SetValueArray(colData arrow.ArrayData) error { + mvc.mapArray = array.NewMapData(colData) + err := mvc.values.SetValueArray(mvc.mapArray.Items().Data()) + if err != nil { + return err + } + err = mvc.keys.SetValueArray(mvc.mapArray.Keys().Data()) + + return err +} + +type structValueContainer struct { + structArray *array.Struct + fieldNames []string + complexValue []bool + fieldValues []columnValues + structArrayType *arrow.StructType +} + +var _ columnValues = (*structValueContainer)(nil) + +func (svc *structValueContainer) Value(i int) (any, error) { + if i < svc.structArray.Len() { + r := "{" + for j := range svc.fieldValues { + r = r + "\"" + svc.fieldNames[j] + "\":" + + if svc.fieldValues[j].IsNull(int(i)) { + r = r + "null" + } else { + v, err := svc.fieldValues[j].Value(int(i)) + if err != nil { + return nil, err + } + + var b string + if svc.complexValue[j] { + b = v.(string) + } else { + vb, err := marshal(v) + if err != nil { + return nil, err + } + b = string(vb) + } + + r = r + b + } + if j < len(svc.fieldValues)-1 { + r = r + "," + } + } + r = r + "}" + + return r, nil + } + return nil, nil +} + +func (svc *structValueContainer) IsNull(i int) bool { + return svc.structArray.IsNull(i) +} + +func (svc *structValueContainer) Release() { + if svc.structArray != nil { + svc.structArray.Release() + } + + for i := range svc.fieldValues { + if svc.fieldValues[i] != nil { + svc.fieldValues[i].Release() + } + } +} + +func (svc *structValueContainer) SetValueArray(colData arrow.ArrayData) error { + svc.structArray = array.NewStructData(colData) + for i := range svc.fieldValues { + err := svc.fieldValues[i].SetValueArray(svc.structArray.Field(i).Data()) + if err != nil { + return err + } + } + + return nil +} + +type dateValueContainer struct { + dateArray *array.Date32 + location *time.Location +} + +var _ columnValues = (*dateValueContainer)(nil) + +func (dvc *dateValueContainer) Value(i int) (any, error) { + d32 := dvc.dateArray.Value(i) + + val := d32.ToTime().In(dvc.location) + return val, nil +} + +func (dvc *dateValueContainer) IsNull(i int) bool { + return dvc.dateArray.IsNull(i) +} + +func (dvc *dateValueContainer) Release() { + if dvc.dateArray != nil { + dvc.dateArray.Release() + } +} + +func (dvc *dateValueContainer) SetValueArray(colData arrow.ArrayData) error { + dvc.dateArray = array.NewDate32Data(colData) + return nil +} + +type timestampValueContainer struct { + timeArray *array.Timestamp + location *time.Location + toTimestampFn func(arrow.Timestamp) time.Time +} + +var _ columnValues = (*timestampValueContainer)(nil) + +func (tvc *timestampValueContainer) Value(i int) (any, error) { + ats := tvc.timeArray.Value(i) + val := tvc.toTimestampFn(ats).In(tvc.location) + + return val, nil +} + +func (tvc *timestampValueContainer) IsNull(i int) bool { + return tvc.timeArray.IsNull(i) +} + +func (tvc *timestampValueContainer) Release() { + if tvc.timeArray != nil { + tvc.timeArray.Release() + } +} + +func (tvc *timestampValueContainer) SetValueArray(colData arrow.ArrayData) error { + tvc.timeArray = array.NewTimestampData(colData) + return nil +} + +type timestampStringValueContainer struct { + timeStringArray *array.String + location *time.Location + fieldName string + *dbsqllog.DBSQLLogger +} + +var _ columnValues = (*timestampStringValueContainer)(nil) + +func (tvc *timestampStringValueContainer) Value(i int) (any, error) { + sv := tvc.timeStringArray.Value(i) + val, err := rowscanner.HandleDateTime(sv, "TIMESTAMP", tvc.fieldName, tvc.location) + if err != nil { + tvc.Err(err).Msg(errArrowRowsDateTimeParse) + } + + return val, nil +} + +func (tvc *timestampStringValueContainer) IsNull(i int) bool { + return tvc.timeStringArray.IsNull(i) +} + +func (tvc *timestampStringValueContainer) Release() { + if tvc.timeStringArray != nil { + tvc.timeStringArray.Release() + } +} + +func (tvc *timestampStringValueContainer) SetValueArray(colData arrow.ArrayData) error { + tvc.timeStringArray = array.NewStringData(colData) + return nil +} + +type decimal128Container struct { + decimalArray *array.Decimal128 + scale int32 +} + +var _ columnValues = (*decimal128Container)(nil) + +func (tvc *decimal128Container) Value(i int) (any, error) { + dv := tvc.decimalArray.Value(i) + fv := dv.ToFloat64(tvc.scale) + return fv, nil +} + +func (tvc *decimal128Container) IsNull(i int) bool { + return tvc.decimalArray.IsNull(i) +} + +func (tvc *decimal128Container) Release() { + if tvc.decimalArray != nil { + tvc.decimalArray.Release() + } +} + +func (tvc *decimal128Container) SetValueArray(colData arrow.ArrayData) error { + tvc.decimalArray = array.NewDecimal128Data(colData) + return nil +} + +func marshal(val any) ([]byte, error) { + if t, ok := val.(time.Time); ok { + s := "\"" + t.String() + "\"" + return []byte(s), nil + } + vb, err := json.Marshal(val) + return vb, err +} + +var nullContainer *nullContainer_ = &nullContainer_{} + +type nullContainer_ struct { +} + +var _ columnValues = (*nullContainer_)(nil) + +func (tvc *nullContainer_) Value(i int) (any, error) { + return nil, nil +} + +func (tvc *nullContainer_) IsNull(i int) bool { + return true +} + +func (tvc *nullContainer_) Release() { +} + +func (tvc *nullContainer_) SetValueArray(colData arrow.ArrayData) error { + return nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/errors.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/errors.go new file mode 100644 index 00000000..04800601 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/errors.go @@ -0,0 +1,37 @@ +package arrowbased + +import "fmt" + +var errArrowRowsNoArrowBatches = "databricks: result set contains 0 arrow batches" +var errArrowRowsUnableToReadBatch = "databricks: unable to read arrow batch" +var errArrowRowsNilArrowSchema = "databricks: nil arrow.Schema" +var errArrowRowsUnableToWriteArrowSchema = "databricks: unable to write arrow schema" +var errArrowRowsInvalidDecimalType = "databricks: decimal type with no scale/precision" +var errArrowRowsUnknownDBType = "databricks: unknown data type when converting to arrow type" +var errArrowRowsDateTimeParse = "databrics: arrow row scanner failed to parse date/time" +var errArrowRowsConvertSchema = "databricks: arrow row scanner failed to convert schema" +var errArrowRowsSerializeSchema = "databricks: arrow row scanner failed to serialize schema" +var errArrowRowsToTimestampFn = "databricks: arrow row scanner failed getting toTimestamp function" +var errArrowRowsMakeColumnValueContainers = "databricks: failed creating column value container" +var errArrowRowsNotArrowFormat = "databricks: result set is not in arrow format" + +const errArrowRowsCloudFetchDownloadFailure = "cloud fetch batch loader failed to download results" + +func errArrowRowsUnsupportedNativeType(t string) string { + return fmt.Sprintf("databricks: arrow native values not yet supported for %s", t) +} +func errArrowRowsUnsupportedWithHiveSchema(t string) string { + return fmt.Sprintf("databricks: arrow native values for %s require arrow schema", t) +} +func errArrowRowsInvalidRowNumber(index int64) string { + return fmt.Sprintf("databricks: row number %d is not contained in any arrow batch", index) +} +func errArrowRowsUnableToCreateDecimalType(scale, precision int32) string { + return fmt.Sprintf("databricks: unable to create decimal type scale: %d, precision: %d", scale, precision) +} +func errArrowRowsUnhandledArrowType(t any) string { + return fmt.Sprintf("databricks: arrow row scanner unhandled type %s", t) +} +func errArrowRowsColumnValue(name string) string { + return fmt.Sprintf("databricks: arrow row scanner failed getting column value for %s", name) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/queue.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/queue.go new file mode 100644 index 00000000..ed1d16f5 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/arrowbased/queue.go @@ -0,0 +1,51 @@ +package arrowbased + +import ( + "container/list" +) + +type Queue[ItemType any] interface { + Enqueue(item *ItemType) + Dequeue() *ItemType + Clear() + Len() int +} + +func NewQueue[ItemType any]() Queue[ItemType] { + return &queue[ItemType]{ + items: list.New(), + } +} + +type queue[ItemType any] struct { + items *list.List +} + +var _ Queue[any] = (*queue[any])(nil) + +func (q *queue[ItemType]) Enqueue(item *ItemType) { + q.items.PushBack(item) +} + +func (q *queue[ItemType]) Dequeue() *ItemType { + el := q.items.Front() + if el == nil { + return nil + } + q.items.Remove(el) + + value, ok := el.Value.(*ItemType) + if !ok { + return nil + } + + return value +} + +func (q *queue[ItemType]) Clear() { + q.items.Init() +} + +func (q *queue[ItemType]) Len() int { + return q.items.Len() +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/columnbased/columnRows.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/columnbased/columnRows.go new file mode 100644 index 00000000..f3ea6577 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/columnbased/columnRows.go @@ -0,0 +1,146 @@ +package columnbased + +import ( + "context" + "database/sql/driver" + "time" + + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerr_int "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + dbsqllog "github.com/databricks/databricks-sql-go/logger" + dbsqlrows "github.com/databricks/databricks-sql-go/rows" +) + +var errRowsParseDateTime = "databricks: column row scanner failed to parse date/time" + +// row scanner for query results in column based format +type columnRowScanner struct { + *dbsqllog.DBSQLLogger + // TRowSet with query results in column format + rowSet *cli_service.TRowSet + schema *cli_service.TTableSchema + + location *time.Location + ctx context.Context + rowscanner.Delimiter +} + +var _ rowscanner.RowScanner = (*columnRowScanner)(nil) + +// NewColumnRowScanner returns a columnRowScanner initialized with the provided +// values. +func NewColumnRowScanner(schema *cli_service.TTableSchema, rowSet *cli_service.TRowSet, cfg *config.Config, logger *dbsqllog.DBSQLLogger, ctx context.Context) (rowscanner.RowScanner, dbsqlerr.DBError) { + if logger == nil { + logger = dbsqllog.Logger + } + + var location *time.Location = time.UTC + if cfg != nil { + if cfg.Location != nil { + location = cfg.Location + } + } + + logger.Debug().Msg("databricks: creating column row scanner") + rs := &columnRowScanner{ + Delimiter: rowscanner.NewDelimiter(rowSet.StartRowOffset, rowscanner.CountRows(rowSet)), + schema: schema, + rowSet: rowSet, + DBSQLLogger: logger, + location: location, + ctx: ctx, + } + + return rs, nil +} + +// Close is called when the Rows instance is closed. +func (crs *columnRowScanner) Close() {} + +// NRows returns the number or rows in the current TRowSet +func (crs *columnRowScanner) NRows() int64 { + if crs == nil { + return 0 + } + return crs.Count() +} + +// ScanRow is called to populate the provided slice with the +// content of the current row. The provided slice will be the same +// size as the number of columns. +// The dest should not be written to outside of ScanRow. Care +// should be taken when closing a RowScanner not to modify +// a buffer held in dest. +func (crs *columnRowScanner) ScanRow( + dest []driver.Value, + rowNumber int64) dbsqlerr.DBError { + + rowIndex := rowNumber - crs.Start() + // populate the destinatino slice + for i := range dest { + val, err := crs.value(crs.rowSet.Columns[i], crs.schema.Columns[i], rowIndex) + + if err != nil { + return err + } + + dest[i] = val + } + + return nil +} + +// value retrieves the value for the specified colum/row +func (crs *columnRowScanner) value(tColumn *cli_service.TColumn, tColumnDesc *cli_service.TColumnDesc, rowNum int64) (val interface{}, err dbsqlerr.DBError) { + // default to UTC time + if crs.location == nil { + crs.location = time.UTC + } + + // Database type name + dbtype := rowscanner.GetDBTypeName(tColumnDesc) + + if tVal := tColumn.GetStringVal(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + var err1 error + // DATE and TIMESTAMP are returned as strings so we need to handle that possibility + val, err1 = rowscanner.HandleDateTime(val, dbtype, tColumnDesc.ColumnName, crs.location) + if err1 != nil { + crs.Err(err).Msg("databrics: column row scanner failed to parse date/time") + err = dbsqlerr_int.NewDriverError(crs.ctx, errRowsParseDateTime, err1) + } + } else if tVal := tColumn.GetByteVal(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } else if tVal := tColumn.GetI16Val(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } else if tVal := tColumn.GetI32Val(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } else if tVal := tColumn.GetI64Val(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } else if tVal := tColumn.GetBoolVal(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } else if tVal := tColumn.GetDoubleVal(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + if dbtype == "FLOAT" { + // database types FLOAT and DOUBLE are both returned as a float64 + // convert to a float32 is valid because the FLOAT type would have + // only been four bytes on the server + val = float32(tVal.Values[rowNum]) + } else { + val = tVal.Values[rowNum] + } + } else if tVal := tColumn.GetBinaryVal(); tVal != nil && !rowscanner.IsNull(tVal.Nulls, rowNum) { + val = tVal.Values[rowNum] + } + + return val, err +} + +func (crs *columnRowScanner) GetArrowBatches( + ctx context.Context, + cfg config.Config, + rpi rowscanner.ResultPageIterator) (dbsqlrows.ArrowBatchIterator, error) { + return nil, dbsqlerr_int.NewDriverError(ctx, "databricks: result set is not in arrow format", nil) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/errors.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/errors.go new file mode 100644 index 00000000..19d27829 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/errors.go @@ -0,0 +1,15 @@ +package rows + +import "fmt" + +var errRowsNoClient = "databricks: instance of Rows missing client" +var errRowsNilRows = "databricks: nil Rows instance" +var errRowsUnknowRowType = "databricks: unknown rows representation" +var errRowsCloseFailed = "databricks: Rows instance Close operation failed" +var errRowsMetadataFetchFailed = "databricks: Rows instance failed to retrieve result set metadata" +var errRowsOnlyForward = "databricks: Rows instance can only iterate forward over rows" +var errInvalidRowNumberState = "databricks: row number is in an invalid state" + +func errRowsInvalidColumnIndex(index int) string { + return fmt.Sprintf("databricks: invalid column index: %d", index) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/rows.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rows.go new file mode 100644 index 00000000..c9581e21 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rows.go @@ -0,0 +1,546 @@ +package rows + +import ( + "context" + "database/sql" + "database/sql/driver" + "io" + "math" + "reflect" + "time" + + "github.com/databricks/databricks-sql-go/driverctx" + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + dbsqlclient "github.com/databricks/databricks-sql-go/internal/client" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerr_int "github.com/databricks/databricks-sql-go/internal/errors" + "github.com/databricks/databricks-sql-go/internal/rows/arrowbased" + "github.com/databricks/databricks-sql-go/internal/rows/columnbased" + "github.com/databricks/databricks-sql-go/internal/rows/rowscanner" + dbsqllog "github.com/databricks/databricks-sql-go/logger" + dbsqlrows "github.com/databricks/databricks-sql-go/rows" +) + +// rows implements the following interfaces from database.sql.driver +// Rows +// RowsColumnTypeScanType +// RowsColumnTypeDatabaseTypeName +// RowsColumnTypeNullable +// RowsColumnTypeLength +type rows struct { + // The RowScanner is responsible for handling the different + // formats in which the query results can be returned + rowscanner.RowScanner + rowscanner.ResultPageIterator + + // Handle for the associated database operation. + opHandle *cli_service.TOperationHandle + + client cli_service.TCLIService + + location *time.Location + + // Metadata for result set + resultSetMetadata *cli_service.TGetResultSetMetadataResp + schema *cli_service.TTableSchema + + config *config.Config + + // connId and correlationId are used for creating a context + // when accessing the server and when logging + connId string + correlationId string + + // Row number within the overall result set + nextRowNumber int64 + + logger_ *dbsqllog.DBSQLLogger + + ctx context.Context +} + +var _ driver.Rows = (*rows)(nil) +var _ driver.RowsColumnTypeScanType = (*rows)(nil) +var _ driver.RowsColumnTypeDatabaseTypeName = (*rows)(nil) +var _ driver.RowsColumnTypeNullable = (*rows)(nil) +var _ driver.RowsColumnTypeLength = (*rows)(nil) +var _ dbsqlrows.Rows = (*rows)(nil) + +func NewRows( + connId string, + correlationId string, + opHandle *cli_service.TOperationHandle, + client cli_service.TCLIService, + config *config.Config, + directResults *cli_service.TSparkDirectResults, +) (driver.Rows, dbsqlerr.DBError) { + + var logger *dbsqllog.DBSQLLogger + var ctx context.Context + if opHandle != nil { + logger = dbsqllog.WithContext(connId, correlationId, dbsqlclient.SprintGuid(opHandle.OperationId.GUID)) + ctx = driverctx.NewContextWithQueryId(driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), connId), correlationId), dbsqlclient.SprintGuid(opHandle.OperationId.GUID)) + } else { + logger = dbsqllog.WithContext(connId, correlationId, "") + ctx = driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), connId), correlationId) + } + + if client == nil { + logger.Error().Msg(errRowsNoClient) + return nil, dbsqlerr_int.NewDriverError(ctx, errRowsNoClient, nil) + } + + var pageSize int64 = 10000 + var location *time.Location = time.UTC + if config != nil { + pageSize = int64(config.MaxRows) + + if config.Location != nil { + location = config.Location + } + } + + logger.Debug().Msgf("databricks: creating Rows, pageSize: %d, location: %v", pageSize, location) + + r := &rows{ + client: client, + opHandle: opHandle, + connId: connId, + correlationId: correlationId, + location: location, + config: config, + logger_: logger, + ctx: ctx, + } + + // if we already have results for the query do some additional initialization + if directResults != nil { + logger.Debug().Msgf("databricks: creating Rows with direct results") + // set the result set metadata + if directResults.ResultSetMetadata != nil { + r.resultSetMetadata = directResults.ResultSetMetadata + r.schema = directResults.ResultSetMetadata.Schema + } + + // initialize the row scanner + err := r.makeRowScanner(directResults.ResultSet) + if err != nil { + return r, err + } + } + + var d rowscanner.Delimiter + if r.RowScanner != nil { + d = rowscanner.NewDelimiter(r.RowScanner.Start(), r.RowScanner.Count()) + } else { + d = rowscanner.NewDelimiter(0, 0) + } + + // If the entire query result set fits in direct results the server closes + // the operations. + closedOnServer := directResults != nil && directResults.CloseOperation != nil + r.ResultPageIterator = rowscanner.NewResultPageIterator( + d, + pageSize, + opHandle, + closedOnServer, + client, + connId, + correlationId, + r.logger(), + ) + + return r, nil +} + +// Columns returns the names of the columns. The number of +// columns of the result is inferred from the length of the +// slice. If a particular column name isn't known, an empty +// string should be returned for that entry. +func (r *rows) Columns() []string { + err := isValidRows(r) + if err != nil { + return []string{} + } + + schema, err := r.getResultSetSchema() + if err != nil { + return []string{} + } + + tColumns := schema.GetColumns() + colNames := make([]string, len(tColumns)) + + for i := range tColumns { + colNames[i] = tColumns[i].ColumnName + } + + return colNames +} + +// Close closes the rows iterator. +func (r *rows) Close() error { + if r == nil { + return nil + } + + if r.RowScanner != nil { + // make sure the row scanner frees up any resources + r.RowScanner.Close() + } + + if r.ResultPageIterator != nil { + r.logger().Debug().Msgf("databricks: closing Rows operation") + err := r.ResultPageIterator.Close() + if err != nil { + r.logger().Err(err).Msg(errRowsCloseFailed) + return dbsqlerr_int.NewRequestError(r.ctx, errRowsCloseFailed, err) + } + } + + return nil +} + +// Next is called to populate the next row of data into +// the provided slice. The provided slice will be the same +// size as the number of columns. +// +// Next should return io.EOF when there are no more rows. +// +// The dest should not be written to outside of Next. Care +// should be taken when closing Rows not to modify +// a buffer held in dest. +func (r *rows) Next(dest []driver.Value) error { + err := isValidRows(r) + if err != nil { + return err + } + + // if the next row is not in the current result page + // fetch the containing page + var b bool + var e error + if b, e = r.isNextRowInPage(); !b && e == nil { + err := r.fetchResultPage() + if err != nil { + return err + } + } + + if e != nil { + return e + } + + // Put values into the destination slice + err = r.ScanRow(dest, r.nextRowNumber) + if err != nil { + return err + } + + r.nextRowNumber++ + + return nil +} + +// ColumnTypeScanType returns column's native type +func (r *rows) ColumnTypeScanType(index int) reflect.Type { + err := isValidRows(r) + if err != nil { + return nil + } + + column, err := r.getColumnMetadataByIndex(index) + if err != nil { + return nil + } + + scanType := getScanType(column) + return scanType +} + +// ColumnTypeDatabaseTypeName returns column's database type name +func (r *rows) ColumnTypeDatabaseTypeName(index int) string { + err := isValidRows(r) + if err != nil { + return "" + } + + column, err := r.getColumnMetadataByIndex(index) + if err != nil { + return "" + } + + dbtype := rowscanner.GetDBTypeName(column) + + return dbtype +} + +// ColumnTypeNullable returns a flag indicating whether the column is nullable +// and an ok value of true if the status of the column is known. Otherwise +// a value of false is returned for ok. +func (r *rows) ColumnTypeNullable(index int) (nullable, ok bool) { + return false, false +} + +func (r *rows) ColumnTypeLength(index int) (length int64, ok bool) { + columnInfo, err := r.getColumnMetadataByIndex(index) + if err != nil { + return 0, false + } + + typeName := rowscanner.GetDBTypeID(columnInfo) + + switch typeName { + case cli_service.TTypeId_STRING_TYPE, + cli_service.TTypeId_VARCHAR_TYPE, + cli_service.TTypeId_BINARY_TYPE, + cli_service.TTypeId_ARRAY_TYPE, + cli_service.TTypeId_MAP_TYPE, + cli_service.TTypeId_STRUCT_TYPE: + return math.MaxInt64, true + default: + return 0, false + } +} + +var ( + scanTypeNull = reflect.TypeOf(nil) + scanTypeBoolean = reflect.TypeOf(true) + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeString = reflect.TypeOf("") + scanTypeDateTime = reflect.TypeOf(time.Time{}) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(any)) +) + +func getScanType(column *cli_service.TColumnDesc) reflect.Type { + + // Currently all types are returned from the thrift server using + // the primitive entry + entry := column.TypeDesc.Types[0].PrimitiveEntry + + switch entry.Type { + case cli_service.TTypeId_BOOLEAN_TYPE: + return scanTypeBoolean + case cli_service.TTypeId_TINYINT_TYPE: + return scanTypeInt8 + case cli_service.TTypeId_SMALLINT_TYPE: + return scanTypeInt16 + case cli_service.TTypeId_INT_TYPE: + return scanTypeInt32 + case cli_service.TTypeId_BIGINT_TYPE: + return scanTypeInt64 + case cli_service.TTypeId_FLOAT_TYPE: + return scanTypeFloat32 + case cli_service.TTypeId_DOUBLE_TYPE: + return scanTypeFloat64 + case cli_service.TTypeId_NULL_TYPE: + return scanTypeNull + case cli_service.TTypeId_STRING_TYPE: + return scanTypeString + case cli_service.TTypeId_CHAR_TYPE: + return scanTypeString + case cli_service.TTypeId_VARCHAR_TYPE: + return scanTypeString + case cli_service.TTypeId_DATE_TYPE, cli_service.TTypeId_TIMESTAMP_TYPE: + return scanTypeDateTime + case cli_service.TTypeId_DECIMAL_TYPE, cli_service.TTypeId_BINARY_TYPE, cli_service.TTypeId_ARRAY_TYPE, + cli_service.TTypeId_STRUCT_TYPE, cli_service.TTypeId_MAP_TYPE, cli_service.TTypeId_UNION_TYPE: + return scanTypeRawBytes + case cli_service.TTypeId_USER_DEFINED_TYPE: + return scanTypeUnknown + case cli_service.TTypeId_INTERVAL_DAY_TIME_TYPE, cli_service.TTypeId_INTERVAL_YEAR_MONTH_TYPE: + return scanTypeString + default: + return scanTypeUnknown + } +} + +// isValidRows checks that the row instance is not nil +// and that it has a client +func isValidRows(r *rows) dbsqlerr.DBError { + var err dbsqlerr.DBError + if r == nil { + err = dbsqlerr_int.NewDriverError(context.Background(), errRowsNilRows, nil) + } else if r.client == nil { + err = dbsqlerr_int.NewDriverError(r.ctx, errRowsNoClient, nil) + r.logger().Err(err).Msg(errRowsNoClient) + } + + return err +} + +func (r *rows) getColumnMetadataByIndex(index int) (*cli_service.TColumnDesc, dbsqlerr.DBError) { + err := isValidRows(r) + if err != nil { + return nil, err + } + + schema, err := r.getResultSetSchema() + if err != nil { + return nil, err + } + + columns := schema.GetColumns() + if index < 0 || index >= len(columns) { + err = dbsqlerr_int.NewDriverError(r.ctx, errRowsInvalidColumnIndex(index), nil) + r.logger().Err(err).Msg(err.Error()) + return nil, err + } + + return columns[index], nil +} + +// isNextRowInPage returns a boolean flag indicating whether +// the next result set row is in the current result set page +func (r *rows) isNextRowInPage() (bool, dbsqlerr.DBError) { + if r == nil || r.RowScanner == nil { + return false, nil + } + + return r.RowScanner.Contains(r.nextRowNumber), nil +} + +// getResultMetadata does a one time fetch of the result set schema +func (r *rows) getResultSetSchema() (*cli_service.TTableSchema, dbsqlerr.DBError) { + if r.schema == nil { + err := isValidRows(r) + if err != nil { + return nil, err + } + + req := cli_service.TGetResultSetMetadataReq{ + OperationHandle: r.opHandle, + } + ctx := driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), r.connId), r.correlationId) + + resp, err2 := r.client.GetResultSetMetadata(ctx, &req) + if err2 != nil { + r.logger().Err(err2).Msg(err2.Error()) + return nil, dbsqlerr_int.NewRequestError(r.ctx, errRowsMetadataFetchFailed, err) + } + + r.resultSetMetadata = resp + r.schema = resp.Schema + + } + + return r.schema, nil +} + +// fetchResultPage will fetch the result page containing the next row, if necessary +func (r *rows) fetchResultPage() error { + var err dbsqlerr.DBError = isValidRows(r) + if err != nil { + return err + } + + if r.RowScanner != nil && r.RowScanner.Contains(r.nextRowNumber) { + return nil + } + + if r.RowScanner != nil && r.nextRowNumber < r.RowScanner.Start() { + return dbsqlerr_int.NewDriverError(r.ctx, errRowsOnlyForward, nil) + } + + // Close/release the existing row scanner before loading the next result page to + // help keep memory usage down. + if r.RowScanner != nil { + r.RowScanner.Close() + r.RowScanner = nil + } + + if !r.ResultPageIterator.HasNext() { + return io.EOF + } + + fetchResult, err1 := r.ResultPageIterator.Next() + if err1 != nil { + return err1 + } + + err1 = r.makeRowScanner(fetchResult) + if err1 != nil { + return err1 + } + + // We should be iterating over the rows so the next row number should be in the + // next result page + if !r.RowScanner.Contains(r.nextRowNumber) { + return dbsqlerr_int.NewDriverError(r.ctx, errInvalidRowNumberState, nil) + } + + return nil +} + +// makeRowScanner creates the embedded RowScanner instance based on the format +// of the returned query results +func (r *rows) makeRowScanner(fetchResults *cli_service.TFetchResultsResp) dbsqlerr.DBError { + + schema, err1 := r.getResultSetSchema() + if err1 != nil { + return err1 + } + + if fetchResults == nil { + return nil + } + + var rs rowscanner.RowScanner + var err dbsqlerr.DBError + if fetchResults.Results != nil { + + if fetchResults.Results.Columns != nil { + rs, err = columnbased.NewColumnRowScanner(schema, fetchResults.Results, r.config, r.logger(), r.ctx) + } else if fetchResults.Results.ArrowBatches != nil { + rs, err = arrowbased.NewArrowRowScanner(r.resultSetMetadata, fetchResults.Results, r.config, r.logger(), r.ctx) + } else if fetchResults.Results.ResultLinks != nil { + rs, err = arrowbased.NewArrowRowScanner(r.resultSetMetadata, fetchResults.Results, r.config, r.logger(), r.ctx) + } else { + r.logger().Error().Msg(errRowsUnknowRowType) + err = dbsqlerr_int.NewDriverError(r.ctx, errRowsUnknowRowType, nil) + } + + } else { + r.logger().Error().Msg(errRowsUnknowRowType) + err = dbsqlerr_int.NewDriverError(r.ctx, errRowsUnknowRowType, nil) + } + + if r.RowScanner != nil { + r.RowScanner.Close() + } + + r.RowScanner = rs + + return err +} + +func (r *rows) logger() *dbsqllog.DBSQLLogger { + if r.logger_ == nil { + if r.opHandle != nil { + r.logger_ = dbsqllog.WithContext(r.connId, r.correlationId, dbsqlclient.SprintGuid(r.opHandle.OperationId.GUID)) + } else { + r.logger_ = dbsqllog.WithContext(r.connId, r.correlationId, "") + } + } + return r.logger_ +} + +func (r *rows) GetArrowBatches(ctx context.Context) (dbsqlrows.ArrowBatchIterator, error) { + // update context with correlationId and connectionId which will be used in logging and errors + ctx = driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(ctx, r.connId), r.correlationId) + + // If a row scanner exists we use it to create the iterator, that way the iterator includes + // data returned as direct results + if r.RowScanner != nil { + return r.RowScanner.GetArrowBatches(ctx, *r.config, r.ResultPageIterator) + } + + return arrowbased.NewArrowRecordIterator(ctx, r.ResultPageIterator, nil, nil, *r.config), nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/resultPageIterator.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/resultPageIterator.go new file mode 100644 index 00000000..43d45bbb --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/resultPageIterator.go @@ -0,0 +1,311 @@ +package rowscanner + +import ( + "context" + "fmt" + "io" + + "github.com/databricks/databricks-sql-go/driverctx" + "github.com/databricks/databricks-sql-go/internal/cli_service" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + dbsqllog "github.com/databricks/databricks-sql-go/logger" +) + +var errRowsResultFetchFailed = "databricks: Rows instance failed to retrieve results" +var ErrRowsFetchPriorToStart = "databricks: unable to fetch row page prior to start of results" +var errRowsNilResultPageFetcher = "databricks: nil ResultPageFetcher instance" + +func errRowsUnandledFetchDirection(dir string) string { + return fmt.Sprintf("databricks: unhandled fetch direction %s", dir) +} + +// Interface for iterating over the pages in the result set of a query +type ResultPageIterator interface { + Next() (*cli_service.TFetchResultsResp, error) + HasNext() bool + Close() error + Delimiter +} + +// Define directions for seeking in the pages of a query result +type Direction int + +const ( + DirUnknown Direction = iota + DirNone + DirForward + DirBack +) + +var directionNames []string = []string{"Unknown", "None", "Forward", "Back"} + +func (d Direction) String() string { + return directionNames[d] +} + +// Create a new result page iterator. +func NewResultPageIterator( + delimiter Delimiter, + maxPageSize int64, + opHandle *cli_service.TOperationHandle, + closedOnServer bool, + client cli_service.TCLIService, + connectionId string, + correlationId string, + logger *dbsqllog.DBSQLLogger, +) ResultPageIterator { + + // delimiter and hasMoreRows are used to set up the point in the paginated + // result set that this iterator starts from. + return &resultPageIterator{ + Delimiter: delimiter, + isFinished: closedOnServer, + maxPageSize: maxPageSize, + opHandle: opHandle, + closedOnServer: closedOnServer, + client: client, + connectionId: connectionId, + correlationId: correlationId, + logger: logger, + } +} + +type resultPageIterator struct { + // Gives the parameters of the current result page + Delimiter + + // indicates whether there are any more pages in the result set + isFinished bool + + // max number of rows to fetch in a page + maxPageSize int64 + + // handle of the operation producing the result set + opHandle *cli_service.TOperationHandle + + // If the server returns an entire result set + // in the direct results it may have already + // closed the operation. + closedOnServer bool + + // client for communicating with the server + client cli_service.TCLIService + + // connectionId to include in logging messages + connectionId string + + // user provided value to include in logging messages + correlationId string + + logger *dbsqllog.DBSQLLogger + + // In some cases we don't know whether there are any records until we fetch + // the first result page. So our behaviour is to fetch a result page as necessary + // before Next() is called. + nextResultPage *cli_service.TFetchResultsResp + + // Hold on to errors so they can be returned by Next() + err error +} + +var _ ResultPageIterator = (*resultPageIterator)(nil) + +// Returns true if there are more pages in the result set. +func (rpf *resultPageIterator) HasNext() bool { + if rpf.isFinished && rpf.nextResultPage == nil { + // There are no more pages to load and there isn't an already fetched + // page waiting to retrieved by Next() + rpf.err = io.EOF + return false + } + + // If there isn't an already fetched result page try to fetch one now + if rpf.nextResultPage == nil { + nrp, err := rpf.getNextPage() + if err != nil { + rpf.Close() + rpf.isFinished = true + rpf.err = err + return false + } + + rpf.err = nil + rpf.nextResultPage = nrp + if !nrp.GetHasMoreRows() { + rpf.Close() + } + } + + return rpf.nextResultPage != nil +} + +// Returns the next page of the result set. io.EOF will be returned if there are +// no more pages. +func (rpf *resultPageIterator) Next() (*cli_service.TFetchResultsResp, error) { + + if rpf == nil { + return nil, dbsqlerrint.NewDriverError(context.Background(), errRowsNilResultPageFetcher, nil) + } + + if !rpf.HasNext() && rpf.nextResultPage == nil { + return nil, rpf.err + } + + nrp := rpf.nextResultPage + rpf.nextResultPage = nil + return nrp, rpf.err +} + +func (rpf *resultPageIterator) getNextPage() (*cli_service.TFetchResultsResp, error) { + if rpf.isFinished { + // no more result pages to fetch + return nil, io.EOF + } + + // Starting row number of next result page. This is used to check that the returned page is + // the expected one. + nextPageStartRow := rpf.Start() + rpf.Count() + + rpf.logger.Debug().Msgf("databricks: fetching result page for row %d", nextPageStartRow) + ctx := driverctx.NewContextWithCorrelationId(driverctx.NewContextWithConnId(context.Background(), rpf.connectionId), rpf.correlationId) + + // Keep fetching in the appropriate direction until we have the expected page. + var fetchResult *cli_service.TFetchResultsResp + var b bool + for b = rpf.Contains(nextPageStartRow); !b; b = rpf.Contains(nextPageStartRow) { + + direction := rpf.Direction(nextPageStartRow) + err := rpf.checkDirectionValid(ctx, direction) + if err != nil { + return nil, err + } + + rpf.logger.Debug().Msgf("fetching next batch of up to %d rows, %s", rpf.maxPageSize, direction.String()) + + var includeResultSetMetadata = true + req := cli_service.TFetchResultsReq{ + OperationHandle: rpf.opHandle, + MaxRows: rpf.maxPageSize, + Orientation: directionToSparkDirection(direction), + IncludeResultSetMetadata: &includeResultSetMetadata, + } + + fetchResult, err = rpf.client.FetchResults(ctx, &req) + if err != nil { + rpf.logger.Err(err).Msg("databricks: Rows instance failed to retrieve results") + return nil, dbsqlerrint.NewRequestError(ctx, errRowsResultFetchFailed, err) + } + + rpf.Delimiter = NewDelimiter(fetchResult.Results.StartRowOffset, CountRows(fetchResult.Results)) + if fetchResult.HasMoreRows != nil { + rpf.isFinished = !*fetchResult.HasMoreRows + } else { + rpf.isFinished = true + } + rpf.logger.Debug().Msgf("databricks: new result page startRow: %d, nRows: %v, hasMoreRows: %v", rpf.Start(), rpf.Count(), fetchResult.HasMoreRows) + } + + return fetchResult, nil +} + +func (rpf *resultPageIterator) Close() (err error) { + // if the operation hasn't already been closed on the server we + // need to do that now + if !rpf.closedOnServer { + rpf.closedOnServer = true + if rpf.client != nil { + req := cli_service.TCloseOperationReq{ + OperationHandle: rpf.opHandle, + } + + _, err = rpf.client.CloseOperation(context.Background(), &req) + return err + } + } + + return +} + +// countRows returns the number of rows in the TRowSet +func CountRows(rowSet *cli_service.TRowSet) int64 { + if rowSet == nil { + return 0 + } + + if rowSet.ArrowBatches != nil { + batches := rowSet.ArrowBatches + var n int64 + for i := range batches { + n += batches[i].RowCount + } + return n + } + + if rowSet.ResultLinks != nil { + links := rowSet.ResultLinks + var n int64 + for i := range links { + n += links[i].RowCount + } + return n + } + + if rowSet != nil && rowSet.Columns != nil { + // Find a column/values and return the number of values. + for _, col := range rowSet.Columns { + if col.BoolVal != nil { + return int64(len(col.BoolVal.Values)) + } + if col.ByteVal != nil { + return int64(len(col.ByteVal.Values)) + } + if col.I16Val != nil { + return int64(len(col.I16Val.Values)) + } + if col.I32Val != nil { + return int64(len(col.I32Val.Values)) + } + if col.I64Val != nil { + return int64(len(col.I64Val.Values)) + } + if col.StringVal != nil { + return int64(len(col.StringVal.Values)) + } + if col.DoubleVal != nil { + return int64(len(col.DoubleVal.Values)) + } + if col.BinaryVal != nil { + return int64(len(col.BinaryVal.Values)) + } + } + } + return 0 +} + +// Check if trying to fetch in the specified direction creates an error condition. +func (rpf *resultPageIterator) checkDirectionValid(ctx context.Context, direction Direction) error { + if direction == DirBack { + // can't fetch rows previous to the start + if rpf.Start() == 0 { + return dbsqlerrint.NewDriverError(ctx, ErrRowsFetchPriorToStart, nil) + } + } else if direction == DirForward { + // can't fetch past the end of the query results + if rpf.isFinished { + return io.EOF + } + } else { + rpf.logger.Error().Msgf(errRowsUnandledFetchDirection(direction.String())) + return dbsqlerrint.NewDriverError(ctx, errRowsUnandledFetchDirection(direction.String()), nil) + } + return nil +} + +func directionToSparkDirection(d Direction) cli_service.TFetchOrientation { + switch d { + case DirBack: + return cli_service.TFetchOrientation_FETCH_PRIOR + default: + return cli_service.TFetchOrientation_FETCH_NEXT + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/rowScanner.go b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/rowScanner.go new file mode 100644 index 00000000..f2e39c16 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/rows/rowscanner/rowScanner.go @@ -0,0 +1,189 @@ +package rowscanner + +import ( + "context" + "database/sql/driver" + "strings" + "time" + + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + "github.com/databricks/databricks-sql-go/internal/cli_service" + "github.com/databricks/databricks-sql-go/internal/config" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" + dbsqlrows "github.com/databricks/databricks-sql-go/rows" +) + +// RowScanner is an interface defining the behaviours that are specific to +// the formats in which query results can be returned. +type RowScanner interface { + Delimiter + // ScanRow is called to populate the provided slice with the + // content of the current row. The provided slice will be the same + // size as the number of columns. + // The dest should not be written to outside of ScanRow. Care + // should be taken when closing a RowScanner not to modify + // a buffer held in dest. + ScanRow(dest []driver.Value, rowNumber int64) dbsqlerr.DBError + + // NRows returns the number of rows in the current result page + NRows() int64 + + // Close any open resources + Close() + + GetArrowBatches(ctx context.Context, cfg config.Config, rpi ResultPageIterator) (dbsqlrows.ArrowBatchIterator, error) +} + +// Expected formats for TIMESTAMP and DATE types when represented by a string value +var DateTimeFormats map[string]string = map[string]string{ + "TIMESTAMP": "2006-01-02 15:04:05.999999999", + "DATE": "2006-01-02", +} + +// IsNull return true if the bit at the provided position is set +func IsNull(nulls []byte, position int64) bool { + index := position / 8 + if int64(len(nulls)) > index { + b := nulls[index] + return (b & (1 << (uint)(position%8))) != 0 + } + return false +} + +type Delimiter interface { + Start() int64 + End() int64 + Count() int64 + Contains(int64) bool + Direction(int64) Direction +} + +func NewDelimiter(start, count int64) Delimiter { + return delimiter{ + start: start, + count: count, + end: start + count - 1, + } +} + +type delimiter struct { + start int64 + end int64 + count int64 +} + +func (d delimiter) Start() int64 { return d.start } +func (d delimiter) End() int64 { return d.end } +func (d delimiter) Count() int64 { return d.count } +func (d delimiter) Contains(i int64) bool { return d.count > 0 && i >= d.start && i <= d.end } +func (d delimiter) Direction(i int64) Direction { + + if d.Contains(i) { + return DirNone + } else if i < d.Start() { + return DirBack + } else if i > d.End() { + return DirForward + } else if d.Count() == 0 { + return DirForward + } else { + return DirUnknown + } +} + +var ErrRowsParseValue = "databricks: unable to parse %s value '%v' from column %s" + +// handleDateTime will convert the passed val to a time.Time value if necessary +func HandleDateTime(val any, dbType, columnName string, location *time.Location) (result any, err error) { + result = val + // if there is a date/time format corresponding to the column type we need to + // convert to time.Time + if format, ok := DateTimeFormats[dbType]; ok { + result, err = parseInLocation(format, val.(string), location) + if err != nil { + err = dbsqlerrint.WrapErrf(err, ErrRowsParseValue, dbType, val, columnName) + } + } + + return result, err +} + +// parseInLocation parses a date/time string in the given format and using the provided +// location. +// This is, essentially, a wrapper around time.ParseInLocation to handle negative year +// values +func parseInLocation(format, dateTimeString string, loc *time.Location) (time.Time, error) { + // we want to handle dates with negative year values and currently we only + // support formats that start with the year so we can just strip a leading minus + // sign + var isNegative bool + dateTimeString, isNegative = stripLeadingNegative(dateTimeString) + + date, err := time.ParseInLocation(format, dateTimeString, loc) + if err != nil { + return time.Time{}, err + } + + if isNegative { + date = date.AddDate(-2*date.Year(), 0, 0) + } + + return date, nil +} + +// stripLeadingNegative will remove a leading ascii or unicode minus +// if present. The possibly shortened string is returned and a flag indicating if +// the string was altered +func stripLeadingNegative(dateTimeString string) (string, bool) { + if dateStartsWithNegative(dateTimeString) { + // strip leading rune from dateTimeString + // using range because it is supposed to be faster than utf8.DecodeRuneInString + for i := range dateTimeString { + if i > 0 { + return dateTimeString[i:], true + } + } + } + + return dateTimeString, false +} + +// ISO 8601 allows for both the ascii and unicode characters for minus +const ( + // unicode minus sign + uMinus string = "\u2212" + // ascii hyphen/minus + aMinus string = "\x2D" +) + +// dateStartsWithNegative returns true if the date string starts with +// a minus sign +func dateStartsWithNegative(val string) bool { + return strings.HasPrefix(val, aMinus) || strings.HasPrefix(val, uMinus) +} + +// GetDBTypeName returns the database type name from a TColumnDesc +func GetDBTypeName(column *cli_service.TColumnDesc) string { + entry := column.TypeDesc.Types[0].PrimitiveEntry + dbtype := strings.TrimSuffix(entry.Type.String(), "_TYPE") + + return dbtype +} + +func GetDBType(column *cli_service.TColumnDesc) cli_service.TTypeId { + entry := column.TypeDesc.Types[0].PrimitiveEntry + return entry.Type +} + +// GetDBTypeID returns the database type ID from a TColumnDesc +func GetDBTypeID(column *cli_service.TColumnDesc) cli_service.TTypeId { + // currently the thrift server returns all types using the primitive entry + entry := column.TypeDesc.Types[0].PrimitiveEntry + return entry.Type +} + +// GetDBTypeQualifiers returns the TTypeQualifiers from a TColumnDesc. +// Return value may be nil. +func GetDBTypeQualifiers(column *cli_service.TColumnDesc) *cli_service.TTypeQualifiers { + return column.TypeDesc.Types[0].PrimitiveEntry.TypeQualifiers +} diff --git a/vendor/github.com/databricks/databricks-sql-go/internal/sentinel/sentinel.go b/vendor/github.com/databricks/databricks-sql-go/internal/sentinel/sentinel.go new file mode 100644 index 00000000..529f2299 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/internal/sentinel/sentinel.go @@ -0,0 +1,138 @@ +package sentinel + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/databricks-sql-go/internal/client" + "github.com/pkg/errors" +) + +const ( + DEFAULT_TIMEOUT = 0 //no timeout + DEFAULT_INTERVAL = 100 * time.Millisecond +) + +type WatchStatus int + +const ( + WatchSuccess WatchStatus = iota + WatchErr + WatchExecuting + WatchTimeout + WatchCanceled +) + +func (s WatchStatus) String() string { + switch s { + case WatchSuccess: + return "SUCCESS" + case WatchErr: + return "ERROR" + case WatchExecuting: + return "EXECUTING" + case WatchCanceled: + return "CANCELED" + case WatchTimeout: + return "TIMEOUT" + } + return "" +} + +type Done func() bool + +type Sentinel struct { + StatusFn func() (doneFn Done, statusResp any, err error) + OnCancelFn func() (onCancelFnResp any, err error) + OnDoneFn func(statusResp any) (onDoneFnResp any, err error) + onCancelFnCalled bool +} + +// Wait takes care of checking the status of something on a given interval, up to a timeout. +// The StatusFn check will continue until given Done function returns true or statusFn returns an error. +// Context cancellation is supported and in that case it will return WaitCanceled status. +func (s Sentinel) Watch(ctx context.Context, interval, timeout time.Duration) (WatchStatus, any, error) { + if s.StatusFn == nil { + s.StatusFn = func() (Done, any, error) { return func() bool { return true }, nil, nil } + } + if timeout == 0 { + timeout = DEFAULT_TIMEOUT + } + if interval == 0 { + interval = DEFAULT_INTERVAL + } + + var timeoutTimerCh <-chan time.Time + if timeout != 0 { + timeoutTimer := time.NewTimer(timeout) + timeoutTimerCh = timeoutTimer.C + defer timeoutTimer.Stop() + } + + intervalTimer := time.NewTimer(interval) + defer intervalTimer.Stop() + + resCh := make(chan any, 1) + errCh := make(chan error, 1) + processor := func(statusResp any) { + ret, err := s.OnDoneFn(statusResp) + if err != nil { + errCh <- err + } else { + resCh <- ret + } + } + + log, _ := client.LoggerAndContext(ctx, nil) + + // If the watch times out or is cancelled this function + // will stop the interval timer and call the cancel function + // if necessary. + timeoutOrCancel := func() { + _ = intervalTimer.Stop() + if s.OnCancelFn != nil && !s.onCancelFnCalled { + s.onCancelFnCalled = true + _, err := s.OnCancelFn() + if err != nil { + log.Err(err).Msg("databricks: cancel failed") + } else { + log.Debug().Msgf("databricks: cancel success") + } + } + } + + for { + select { + case <-intervalTimer.C: + done, statusResp, err := s.StatusFn() + if err != nil { + return WatchErr, statusResp, err + } + // resetting it here so statusFn is called again after interval time + _ = intervalTimer.Reset(interval) + if done() { + intervalTimer.Stop() + if s.OnDoneFn != nil { + go processor(statusResp) + } else { + return WatchSuccess, statusResp, nil + } + } + case err := <-errCh: + return WatchErr, nil, err + case res := <-resCh: + return WatchSuccess, res, nil + case <-ctx.Done(): + log.Debug().Msgf("sentinel <-ctx.Done: %s", ctx.Err().Error()) + timeoutOrCancel() + return WatchCanceled, nil, ctx.Err() + case <-timeoutTimerCh: + msg := fmt.Sprintf("wait timed out after %s", timeout.String()) + log.Info().Msg(msg) + timeoutOrCancel() + err := errors.New(msg) + return WatchTimeout, nil, err + } + } +} diff --git a/vendor/github.com/databricks/databricks-sql-go/logger/logger.go b/vendor/github.com/databricks/databricks-sql-go/logger/logger.go new file mode 100644 index 00000000..683501a1 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/logger/logger.go @@ -0,0 +1,139 @@ +package logger + +import ( + "io" + "os" + "runtime" + "time" + + "github.com/mattn/go-isatty" + "github.com/rs/zerolog" +) + +type DBSQLLogger struct { + zerolog.Logger +} + +// Track is a simple utility function to use with logger to log a message with a timestamp. +// Recommended to use in conjunction with Duration. +// +// For example: +// +// msg, start := log.Track("Run operation") +// defer log.Duration(msg, start) +func (l *DBSQLLogger) Track(msg string) (string, time.Time) { + return msg, time.Now() +} + +// Duration logs a debug message with the time elapsed between the provided start and the current time. +// Use in conjunction with Track. +// +// For example: +// +// msg, start := log.Track("Run operation") +// defer log.Duration(msg, start) +func (l *DBSQLLogger) Duration(msg string, start time.Time) { + l.Debug().Msgf("%v elapsed time: %v", msg, time.Since(start)) +} + +var Logger = &DBSQLLogger{ + zerolog.New(os.Stderr).With().Timestamp().Logger(), +} + +// Enable pretty printing for interactive terminals and json for production. +func init() { + // for tty terminal enable pretty logs + if isatty.IsTerminal(os.Stdout.Fd()) && runtime.GOOS != "windows" { + Logger = &DBSQLLogger{Logger.Output(zerolog.ConsoleWriter{Out: os.Stderr})} + } + // by default only log warns or above + loglvl := zerolog.WarnLevel + if lvst := os.Getenv("DATABRICKS_LOG_LEVEL"); lvst != "" { + if lv, err := zerolog.ParseLevel(lvst); err != nil { + Logger.Error().Msgf("log level %s not recognized", lvst) + } else { + loglvl = lv + } + } + Logger.Logger = Logger.Level(loglvl) + Logger.Info().Msgf("setting log level to %s", loglvl) +} + +// Sets log level. Default is "warn" +// Available levels are: "trace" "debug" "info" "warn" "error" "fatal" "panic" or "disabled" +func SetLogLevel(l string) error { + if lv, err := zerolog.ParseLevel(l); err != nil { + return err + } else { + Logger.Logger = Logger.Level(lv) + return nil + } +} + +// Sets logging output. Default is os.Stderr. If in terminal, pretty logs are enabled. +func SetLogOutput(w io.Writer) { + Logger.Logger = Logger.Output(w) +} + +// Sets log to trace. -1 +// You must call Msg on the returned event in order to send the event. +func Trace() *zerolog.Event { + return Logger.Trace() +} + +// Sets log to debug. 0 +// You must call Msg on the returned event in order to send the event. +func Debug() *zerolog.Event { + return Logger.Debug() +} + +// Sets log to info. 1 +// You must call Msg on the returned event in order to send the event. +func Info() *zerolog.Event { + return Logger.Info() +} + +// Sets log to warn. 2 +// You must call Msg on the returned event in order to send the event. +func Warn() *zerolog.Event { + return Logger.Warn() +} + +// Sets log to error. 3 +// You must call Msg on the returned event in order to send the event. +func Error() *zerolog.Event { + return Logger.Error() +} + +// Sets log to fatal. 4 +// You must call Msg on the returned event in order to send the event. +func Fatal() *zerolog.Event { + return Logger.Fatal() +} + +// Sets log to panic. 5 +// You must call Msg on the returned event in order to send the event. +func Panic() *zerolog.Event { + return Logger.Panic() +} + +// Err starts a new message with error level with err as a field if not nil or with info level if err is nil. +// You must call Msg on the returned event in order to send the event. +func Err(err error) *zerolog.Event { + return Logger.Err(err) +} + +// WithContext sets connectionId, correlationId, and queryId to be used as fields. +func WithContext(connectionId string, correlationId string, queryId string) *DBSQLLogger { + return &DBSQLLogger{Logger.With().Str("connId", connectionId).Str("corrId", correlationId).Str("queryId", queryId).Logger()} +} + +// Track is a convenience function to track time spent +func Track(msg string) (string, time.Time) { + return msg, time.Now() +} + +// Duration is a convenience function to log elapsed time. Often used with Track +func Duration(msg string, start time.Time) { + Logger.Debug().Msgf("%v elapsed time: %v", msg, time.Since(start)) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/parameters.go b/vendor/github.com/databricks/databricks-sql-go/parameters.go new file mode 100644 index 00000000..0652d80b --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/parameters.go @@ -0,0 +1,209 @@ +package dbsql + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/databricks/databricks-sql-go/internal/cli_service" +) + +type Parameter struct { + Name string + Type SqlType + Value any +} + +type SqlType int + +const ( + SqlUnkown SqlType = iota + SqlString + SqlDate + SqlTimestamp + SqlFloat + SqlDecimal + SqlDouble + SqlInteger + SqlBigInt + SqlSmallInt + SqlTinyInt + SqlBoolean + SqlIntervalMonth + SqlIntervalDay + SqlVoid +) + +func (s SqlType) String() string { + switch s { + case SqlString: + return "STRING" + case SqlDate: + return "DATE" + case SqlTimestamp: + return "TIMESTAMP" + case SqlFloat: + return "FLOAT" + case SqlDecimal: + return "DECIMAL" + case SqlDouble: + return "DOUBLE" + case SqlInteger: + return "INTEGER" + case SqlBigInt: + return "BIGINT" + case SqlSmallInt: + return "SMALLINT" + case SqlTinyInt: + return "TINYINT" + case SqlBoolean: + return "BOOLEAN" + case SqlIntervalMonth: + return "INTERVAL MONTH" + case SqlIntervalDay: + return "INTERVAL DAY" + case SqlVoid: + return "VOID" + } + return "unknown" +} + +func valuesToParameters(namedValues []driver.NamedValue) []Parameter { + var params []Parameter + for i := range namedValues { + newParam := *new(Parameter) + namedValue := namedValues[i] + param, ok := namedValue.Value.(Parameter) + if ok { + newParam.Name = param.Name + newParam.Value = param.Value + newParam.Type = param.Type + } else { + newParam.Name = namedValue.Name + newParam.Value = namedValue.Value + } + params = append(params, newParam) + } + return params +} + +func inferTypes(params []Parameter) { + for i := range params { + param := ¶ms[i] + if param.Type == SqlUnkown { + inferType(param) + } + } +} + +func inferType(param *Parameter) { + if param.Value != nil && reflect.ValueOf(param.Value).Kind() == reflect.Ptr { + param.Value = reflect.ValueOf(param.Value).Elem().Interface() + inferType(param) + return + } + + switch value := param.Value.(type) { + case bool: + param.Value = strconv.FormatBool(value) + param.Type = SqlBoolean + case string: + param.Value = value + param.Type = SqlString + case int: + param.Value = strconv.Itoa(value) + param.Type = SqlInteger + case uint: + param.Value = strconv.FormatUint(uint64(value), 10) + param.Type = SqlInteger + case int8: + param.Value = strconv.Itoa(int(value)) + param.Type = SqlInteger + case uint8: + param.Value = strconv.FormatUint(uint64(value), 10) + param.Type = SqlInteger + case int16: + param.Value = strconv.Itoa(int(value)) + param.Type = SqlInteger + case uint16: + param.Value = strconv.FormatUint(uint64(value), 10) + param.Type = SqlInteger + case int32: + param.Value = strconv.Itoa(int(value)) + param.Type = SqlInteger + case uint32: + param.Value = strconv.FormatUint(uint64(value), 10) + param.Type = SqlInteger + case int64: + param.Value = strconv.Itoa(int(value)) + param.Type = SqlInteger + case uint64: + param.Value = strconv.FormatUint(uint64(value), 10) + param.Type = SqlInteger + case float32: + param.Value = strconv.FormatFloat(float64(value), 'f', -1, 32) + param.Type = SqlFloat + case float64: + param.Value = strconv.FormatFloat(float64(value), 'f', -1, 64) + param.Type = SqlFloat + case time.Time: + param.Value = value.Format(time.RFC3339Nano) + param.Type = SqlTimestamp + case nil: + param.Value = nil + param.Type = SqlVoid + default: + s := fmt.Sprintf("%s", param.Value) + param.Value = s + param.Type = SqlString + } +} + +func convertNamedValuesToSparkParams(values []driver.NamedValue) []*cli_service.TSparkParameter { + var sparkParams []*cli_service.TSparkParameter + + sqlParams := valuesToParameters(values) + inferTypes(sqlParams) + for i := range sqlParams { + sqlParam := sqlParams[i] + sparkValue := new(cli_service.TSparkParameterValue) + if sqlParam.Type == SqlVoid { + sparkValue = nil + } else { + stringValue := sqlParam.Value.(string) + sparkValue = &cli_service.TSparkParameterValue{StringValue: &stringValue} + } + + var sparkParamType string + if sqlParam.Type == SqlDecimal { + sparkParamType = inferDecimalType(sparkValue.GetStringValue()) + } else { + sparkParamType = sqlParam.Type.String() + } + sparkParam := cli_service.TSparkParameter{Name: &sqlParam.Name, Type: &sparkParamType, Value: sparkValue} + sparkParams = append(sparkParams, &sparkParam) + } + return sparkParams +} + +func inferDecimalType(d string) (t string) { + var overall int + var after int + if strings.HasPrefix(d, "0.") { + // Less than one + overall = len(d) - 2 + after = len(d) - 2 + } else if !strings.Contains(d, ".") { + // Less than one + overall = len(d) + after = 0 + } else { + components := strings.Split(d, ".") + overall, after = len(components[0])+len(components[1]), len(components[1]) + } + + return fmt.Sprintf("DECIMAL(%d,%d)", overall, after) +} diff --git a/vendor/github.com/databricks/databricks-sql-go/result.go b/vendor/github.com/databricks/databricks-sql-go/result.go new file mode 100644 index 00000000..bb8bfa59 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/result.go @@ -0,0 +1,21 @@ +package dbsql + +import "database/sql/driver" + +type result struct { + AffectedRows int64 + InsertId int64 +} + +var _ driver.Result = (*result)(nil) + +// LastInsertId returns the database's auto-generated ID after an insert into a table. +// This is currently not really implemented for this driver and will always return 0. +func (res *result) LastInsertId() (int64, error) { + return res.InsertId, nil +} + +// RowsAffected returns the number of rows affected by the query. +func (res *result) RowsAffected() (int64, error) { + return res.AffectedRows, nil +} diff --git a/vendor/github.com/databricks/databricks-sql-go/rows/rows.go b/vendor/github.com/databricks/databricks-sql-go/rows/rows.go new file mode 100644 index 00000000..c3d93cc7 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/rows/rows.go @@ -0,0 +1,23 @@ +package rows + +import ( + "context" + + "github.com/apache/arrow/go/v12/arrow" +) + +type Rows interface { + GetArrowBatches(context.Context) (ArrowBatchIterator, error) +} + +type ArrowBatchIterator interface { + // Retrieve the next arrow.Record. + // Will return io.EOF if there are no more records + Next() (arrow.Record, error) + + // Return true if the iterator contains more batches, false otherwise. + HasNext() bool + + // Release any resources in use by the iterator. + Close() +} diff --git a/vendor/github.com/databricks/databricks-sql-go/statement.go b/vendor/github.com/databricks/databricks-sql-go/statement.go new file mode 100644 index 00000000..4bef5171 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/statement.go @@ -0,0 +1,59 @@ +package dbsql + +import ( + "context" + "database/sql/driver" + + dbsqlerr "github.com/databricks/databricks-sql-go/errors" + dbsqlerrint "github.com/databricks/databricks-sql-go/internal/errors" +) + +type stmt struct { + conn *conn + query string +} + +// Close closes the statement. +func (s *stmt) Close() error { + // no-op + return nil +} + +// NumInput returns -1 and the sql package will not sanity check Exec or Query argument counts. +func (s *stmt) NumInput() int { + return -1 +} + +// Exec is not implemented. +// +// Deprecated: Use StmtExecContext instead. +func (s *stmt) Exec(args []driver.Value) (driver.Result, error) { + return nil, dbsqlerrint.NewDriverError(context.TODO(), dbsqlerr.ErrNotImplemented, nil) +} + +// Query is not implemented. +// +// Deprecated: Use StmtQueryContext instead. +func (s *stmt) Query(args []driver.Value) (driver.Rows, error) { + return nil, dbsqlerrint.NewDriverError(context.TODO(), dbsqlerr.ErrNotImplemented, nil) +} + +// ExecContext executes a query that doesn't return rows, such +// as an INSERT or UPDATE. +// +// ExecContext honors the context timeout and return when it is canceled. +// Statement ExecContext is the same as connection ExecContext +func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + return s.conn.ExecContext(ctx, s.query, args) +} + +// QueryContext executes a query that may return rows, such as a +// SELECT. +// +// QueryContext honors the context timeout and return when it is canceled. +// Statement QueryContext is the same as connection QueryContext +func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + return s.conn.QueryContext(ctx, s.query, args) +} + +var _ driver.Stmt = (*stmt)(nil) diff --git a/vendor/github.com/databricks/databricks-sql-go/testserver.go b/vendor/github.com/databricks/databricks-sql-go/testserver.go new file mode 100644 index 00000000..c5648e17 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/testserver.go @@ -0,0 +1,81 @@ +package dbsql + +import ( + "net/http" + "net/http/httptest" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/databricks/databricks-sql-go/internal/cli_service" +) + +type thriftHandler struct { + processor thrift.TProcessor + inPfactory, outPfactory thrift.TProtocolFactory + count503_2_retries int + count503_5_retries int + count429_2_retries int +} + +func (h *thriftHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.RequestURI { + case "/503-2-retries": + if h.count503_2_retries <= 1 { + w.WriteHeader(http.StatusServiceUnavailable) + h.count503_2_retries++ + return + } else { + h.count503_2_retries = 0 + } + case "/429-2-retries": + if h.count429_2_retries <= 1 { + + w.Header().Add("Retry-After", "1") + w.WriteHeader(http.StatusTooManyRequests) + h.count429_2_retries++ + return + } else { + h.count429_2_retries = 0 + } + case "/503-5-retries": + if h.count503_5_retries <= 5 { + w.WriteHeader(http.StatusServiceUnavailable) + h.count503_5_retries++ + return + } else { + h.count503_5_retries = 0 + } + case "/429-5-retries": + if h.count503_5_retries <= 5 { + w.Header().Set("Retry-After", "12") + w.WriteHeader(http.StatusTooManyRequests) + h.count503_5_retries++ + return + } else { + h.count503_5_retries = 0 + } + } + + thriftHandler := thrift.NewThriftHandlerFunc(h.processor, h.inPfactory, h.outPfactory) + thriftHandler(w, r) +} + +func initThriftTestServer(handler cli_service.TCLIService) *httptest.Server { + + tcfg := &thrift.TConfiguration{ + TLSConfig: nil, + } + + protocolFactory := thrift.NewTBinaryProtocolFactoryConf(tcfg) + + processor := cli_service.NewTCLIServiceProcessor(handler) + + th := thriftHandler{ + processor: processor, + inPfactory: protocolFactory, + outPfactory: protocolFactory, + } + + ts := httptest.NewServer(&th) + + return ts +} diff --git a/vendor/github.com/databricks/databricks-sql-go/tools.go b/vendor/github.com/databricks/databricks-sql-go/tools.go new file mode 100644 index 00000000..bcd0f475 --- /dev/null +++ b/vendor/github.com/databricks/databricks-sql-go/tools.go @@ -0,0 +1,7 @@ +//go:build tools + +package tools + +import ( + _ "gotest.tools/gotestsum" +) diff --git a/vendor/github.com/dnephin/pflag/.gitignore b/vendor/github.com/dnephin/pflag/.gitignore new file mode 100644 index 00000000..c3da2901 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/.gitignore @@ -0,0 +1,2 @@ +.idea/* + diff --git a/vendor/github.com/dnephin/pflag/.travis.yml b/vendor/github.com/dnephin/pflag/.travis.yml new file mode 100644 index 00000000..00d04cb9 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/.travis.yml @@ -0,0 +1,22 @@ +sudo: false + +language: go + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get golang.org/x/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... + +script: + - verify/all.sh -v + - go test ./... diff --git a/vendor/github.com/dnephin/pflag/LICENSE b/vendor/github.com/dnephin/pflag/LICENSE new file mode 100644 index 00000000..63ed1cfe --- /dev/null +++ b/vendor/github.com/dnephin/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dnephin/pflag/README.md b/vendor/github.com/dnephin/pflag/README.md new file mode 100644 index 00000000..971475fb --- /dev/null +++ b/vendor/github.com/dnephin/pflag/README.md @@ -0,0 +1 @@ +This is a fork of `spf13/pflag`. See [gotestsum/issues/176](https://github.com/gotestyourself/gotestsum/issues/176#issuecomment-780854592). diff --git a/vendor/github.com/dnephin/pflag/bool.go b/vendor/github.com/dnephin/pflag/bool.go new file mode 100644 index 00000000..c4c5c0bf --- /dev/null +++ b/vendor/github.com/dnephin/pflag/bool.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/vendor/github.com/dnephin/pflag/bool_slice.go b/vendor/github.com/dnephin/pflag/bool_slice.go new file mode 100644 index 00000000..3731370d --- /dev/null +++ b/vendor/github.com/dnephin/pflag/bool_slice.go @@ -0,0 +1,185 @@ +package pflag + +import ( + "io" + "strconv" + "strings" +) + +// -- boolSlice Value +type boolSliceValue struct { + value *[]bool + changed bool +} + +func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { + bsv := new(boolSliceValue) + bsv.value = p + *bsv.value = val + return bsv +} + +// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. +// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. +func (s *boolSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse boolean values into slice + out := make([]bool, 0, len(boolStrSlice)) + for _, boolStr := range boolStrSlice { + b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) + if err != nil { + return err + } + out = append(out, b) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *boolSliceValue) Type() string { + return "boolSlice" +} + +// String defines a "native" format for this boolean slice flag value. +func (s *boolSliceValue) String() string { + + boolStrSlice := make([]string, len(*s.value)) + for i, b := range *s.value { + boolStrSlice[i] = strconv.FormatBool(b) + } + + out, _ := writeAsCSV(boolStrSlice) + + return "[" + out + "]" +} + +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func boolSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []bool{}, nil + } + ss := strings.Split(val, ",") + out := make([]bool, len(ss)) + for i, t := range ss { + var err error + out[i], err = strconv.ParseBool(t) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetBoolSlice returns the []bool value of a flag with the given name. +func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { + val, err := f.getFlagType(name, "boolSlice", boolSliceConv) + if err != nil { + return []bool{}, err + } + return val.([]bool), nil +} + +// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. +// The argument p points to a []bool variable in which to store the value of the flag. +func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) +} + +// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { + CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, "", value, usage) + return &p +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + p := []bool{} + f.BoolSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// BoolSlice defines a []bool flag with specified name, default value, and usage string. +// The return value is the address of a []bool variable that stores the value of the flag. +func BoolSlice(name string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, "", value, usage) +} + +// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. +func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { + return CommandLine.BoolSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/bytes.go b/vendor/github.com/dnephin/pflag/bytes.go new file mode 100644 index 00000000..67d53045 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/bytes.go @@ -0,0 +1,209 @@ +package pflag + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + "strings" +) + +// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded +type bytesHexValue []byte + +// String implements pflag.Value.String. +func (bytesHex bytesHexValue) String() string { + return fmt.Sprintf("%X", []byte(bytesHex)) +} + +// Set implements pflag.Value.Set. +func (bytesHex *bytesHexValue) Set(value string) error { + bin, err := hex.DecodeString(strings.TrimSpace(value)) + + if err != nil { + return err + } + + *bytesHex = bin + + return nil +} + +// Type implements pflag.Value.Type. +func (*bytesHexValue) Type() string { + return "bytesHex" +} + +func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { + *p = val + return (*bytesHexValue)(p) +} + +func bytesHexConv(sval string) (interface{}, error) { + + bin, err := hex.DecodeString(sval) + + if err == nil { + return bin, nil + } + + return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) +} + +// GetBytesHex return the []byte value of a flag with the given name +func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { + val, err := f.getFlagType(name, "bytesHex", bytesHexConv) + + if err != nil { + return []byte{}, err + } + + return val.([]byte), nil +} + +// BytesHexVar defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { + f.VarP(newBytesHexValue(value, p), name, "", usage) +} + +// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { + f.VarP(newBytesHexValue(value, p), name, shorthand, usage) +} + +// BytesHexVar defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func BytesHexVar(p *[]byte, name string, value []byte, usage string) { + CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) +} + +// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. +func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { + CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) +} + +// BytesHex defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesHexVarP(p, name, "", value, usage) + return p +} + +// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesHexVarP(p, name, shorthand, value, usage) + return p +} + +// BytesHex defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func BytesHex(name string, value []byte, usage string) *[]byte { + return CommandLine.BytesHexP(name, "", value, usage) +} + +// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. +func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { + return CommandLine.BytesHexP(name, shorthand, value, usage) +} + +// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded +type bytesBase64Value []byte + +// String implements pflag.Value.String. +func (bytesBase64 bytesBase64Value) String() string { + return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) +} + +// Set implements pflag.Value.Set. +func (bytesBase64 *bytesBase64Value) Set(value string) error { + bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) + + if err != nil { + return err + } + + *bytesBase64 = bin + + return nil +} + +// Type implements pflag.Value.Type. +func (*bytesBase64Value) Type() string { + return "bytesBase64" +} + +func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { + *p = val + return (*bytesBase64Value)(p) +} + +func bytesBase64ValueConv(sval string) (interface{}, error) { + + bin, err := base64.StdEncoding.DecodeString(sval) + if err == nil { + return bin, nil + } + + return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) +} + +// GetBytesBase64 return the []byte value of a flag with the given name +func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { + val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) + + if err != nil { + return []byte{}, err + } + + return val.([]byte), nil +} + +// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { + f.VarP(newBytesBase64Value(value, p), name, "", usage) +} + +// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { + f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) +} + +// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { + CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) +} + +// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. +func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { + CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) +} + +// BytesBase64 defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesBase64VarP(p, name, "", value, usage) + return p +} + +// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesBase64VarP(p, name, shorthand, value, usage) + return p +} + +// BytesBase64 defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func BytesBase64(name string, value []byte, usage string) *[]byte { + return CommandLine.BytesBase64P(name, "", value, usage) +} + +// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. +func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { + return CommandLine.BytesBase64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/count.go b/vendor/github.com/dnephin/pflag/count.go new file mode 100644 index 00000000..a0b2679f --- /dev/null +++ b/vendor/github.com/dnephin/pflag/count.go @@ -0,0 +1,96 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + // "+1" means that no specific value was passed, so increment + if s == "+1" { + *i = countValue(*i + 1) + return nil + } + v, err := strconv.ParseInt(s, 0, 0) + *i = countValue(v) + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value every time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "+1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value every time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/vendor/github.com/dnephin/pflag/duration.go b/vendor/github.com/dnephin/pflag/duration.go new file mode 100644 index 00000000..e9debef8 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/duration_slice.go b/vendor/github.com/dnephin/pflag/duration_slice.go new file mode 100644 index 00000000..badadda5 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/duration_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// -- durationSlice Value +type durationSliceValue struct { + value *[]time.Duration + changed bool +} + +func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { + dsv := new(durationSliceValue) + dsv.value = p + *dsv.value = val + return dsv +} + +func (s *durationSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *durationSliceValue) Type() string { + return "durationSlice" +} + +func (s *durationSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%s", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func durationSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []time.Duration{}, nil + } + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetDurationSlice returns the []time.Duration value of a flag with the given name +func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { + val, err := f.getFlagType(name, "durationSlice", durationSliceConv) + if err != nil { + return []time.Duration{}, err + } + return val.([]time.Duration), nil +} + +// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. +// The argument p points to a []time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. +// The argument p points to a duration[] variable in which to store the value of the flag. +func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, "", value, usage) + return &p +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, "", value, usage) +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/flag.go b/vendor/github.com/dnephin/pflag/flag.go new file mode 100644 index 00000000..e57abb4f --- /dev/null +++ b/vendor/github.com/dnephin/pflag/flag.go @@ -0,0 +1,1242 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/spf13/pflag" + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") + } + flag.VarP(&flagval, "varname", "v", "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + goflag "flag" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// ParseErrorsWhitelist defines the parsing errors that can be ignored +type ParseErrorsWhitelist struct { + // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags + UnknownFlags bool +} + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + // SortFlags is used to indicate, if user wants to have sorted flags in + // help/usage messages. + SortFlags bool + + // ParseErrorsWhitelist is used to configure a whitelist of errors + ParseErrorsWhitelist ParseErrorsWhitelist + + name string + parsed bool + actual map[NormalizedName]*Flag + orderedActual []*Flag + sortedActual []*Flag + formal map[NormalizedName]*Flag + orderedFormal []*Flag + sortedFormal []*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + errorHandling ErrorHandling + output io.Writer // nil means stderr; use Output() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName + + addedGoFlagSets []*goflag.FlagSet +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string // default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + f.sortedFormal = f.sortedFormal[:0] + for fname, flag := range f.formal { + nname := f.normalizeFlagName(flag.Name) + if fname == nname { + continue + } + flag.Name = string(nname) + delete(f.formal, fname) + f.formal[nname] = flag + if _, set := f.actual[fname]; set { + delete(f.actual, fname) + f.actual[nname] = flag + } + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +// Output returns the destination for usage and error messages. os.Stderr is returned if +// output was not set or was set to nil. +func (f *FlagSet) Output() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// Name returns the name of the flag set. +func (f *FlagSet) Name() string { + return f.name +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + if len(f.formal) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.formal) != len(f.sortedFormal) { + f.sortedFormal = sortFlags(f.formal) + } + flags = f.sortedFormal + } else { + flags = f.orderedFormal + } + + for _, flag := range flags { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags defined. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// that are not hidden. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden { + return true + } + } + return false +} + +// VisitAll visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + if len(f.actual) == 0 { + return + } + + var flags []*Flag + if f.SortFlags { + if len(f.actual) != len(f.sortedActual) { + f.sortedActual = sortFlags(f.actual) + } + flags = f.sortedActual + } else { + flags = f.orderedActual + } + + for _, flag := range flags { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order or +// in primordial order if f.SortFlags is false, calling fn for each. +// It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +// It panics, if len(name) > 1. +func (f *FlagSet) ShorthandLookup(name string) *Flag { + if name == "" { + return nil + } + if len(name) > 1 { + msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) + fmt.Fprintf(f.Output(), msg) + panic(msg) + } + c := name[0] + return f.shorthands[c] +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + flag.Hidden = true + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if usageMessage == "" { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// ShorthandLookup returns the Flag structure of the short handed flag, +// returning nil if none exists. +func ShorthandLookup(name string) *Flag { + return CommandLine.ShorthandLookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + + err := flag.Value.Set(value) + if err != nil { + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) + } + + if !flag.Changed { + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) + + flag.Changed = true + } + + if flag.Deprecated != "" { + fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprint(f.Output(), usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + case "stringSlice": + name = "strings" + case "intSlice": + name = "ints" + case "uintSlice": + name = "uints" + case "boolSlice": + name = "bools" + } + + return +} + +// Splits the string `s` on whitespace into an initial substring up to +// `i` runes in length and the remainder. Will go `slop` over `i` if +// that encompasses the entire string (which allows the caller to +// avoid short orphan words on the final line). +func wrapN(i, slop int, s string) (string, string) { + if i+slop > len(s) { + return s, "" + } + + w := strings.LastIndexAny(s[:i], " \t\n") + if w <= 0 { + return s, "" + } + nlPos := strings.LastIndex(s[:i], "\n") + if nlPos > 0 && nlPos < w { + return s[:nlPos], s[nlPos+1:] + } + return s[:w], s[w+1:] +} + +// Wraps the string `s` to a maximum width `w` with leading indent +// `i`. The first line is not indented (this is assumed to be done by +// caller). Pass `w` == 0 to do no wrapping +func wrap(i, w int, s string) string { + if w == 0 { + return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) + } + + // space between indent i and end of line width w into which + // we should wrap the text. + wrap := w - i + + var r, l string + + // Not enough space for sensible wrapping. Wrap as a block on + // the next line instead. + if wrap < 24 { + i = 16 + wrap = w - i + r += "\n" + strings.Repeat(" ", i) + } + // If still not enough space then don't even try to wrap. + if wrap < 24 { + return strings.Replace(s, "\n", r, -1) + } + + // Try to avoid short orphan words on the final line, by + // allowing wrapN to go a bit over if that would fit in the + // remainder of the line. + slop := 5 + wrap = wrap - slop + + // Handle first line, which is indented by the caller (or the + // special case above) + l, s = wrapN(wrap, slop, s) + r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) + + // Now wrap the rest + for s != "" { + var t string + + t, s = wrapN(wrap, slop, s) + r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) + } + + return r + +} + +// FlagUsagesWrapped returns a string containing the usage information +// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no +// wrapping) +func (f *FlagSet) FlagUsagesWrapped(cols int) string { + buf := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if flag.Hidden { + return + } + + line := "" + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if varname != "" { + line += " " + varname + } + if flag.NoOptDefVal != "" { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + if len(flag.Deprecated) != 0 { + line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx + fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) + } + + return buf.String() +} + +// FlagUsages returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + return f.FlagUsagesWrapped(0) +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadyThere := f.formal[normalizedFlagName] + if alreadyThere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.Output(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + f.orderedFormal = append(f.orderedFormal, flag) + + if flag.Shorthand == "" { + return + } + if len(flag.Shorthand) > 1 { + msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) + fmt.Fprintf(f.Output(), msg) + panic(msg) + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + used, alreadyThere := f.shorthands[c] + if alreadyThere { + msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) + fmt.Fprintf(f.Output(), msg) + panic(msg) + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored. +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + if f.errorHandling != ContinueOnError { + fmt.Fprintln(f.Output(), err) + f.usage() + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +//--unknown (args will be empty) +//--unknown --next-flag ... (args will be --next-flag ...) +//--unknown arg ... (args will be arg ...) +func stripUnknownFlagValue(args []string) []string { + if len(args) == 0 { + //--unknown + return args + } + + first := args[0] + if len(first) > 0 && first[0] == '-' { + //--unknown --next-flag ... + return args + } + + //--unknown arg ... (args will be arg ...) + if len(args) > 1 { + return args[1:] + } + return nil +} + +func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, exists := f.formal[f.normalizeFlagName(name)] + + if !exists { + switch { + case name == "help": + f.usage() + return a, ErrHelp + case f.ParseErrorsWhitelist.UnknownFlags: + // --unknown=unknownval arg ... + // we do not want to lose arg in this case + if len(split) >= 2 { + return a, nil + } + + return stripUnknownFlagValue(a), nil + default: + err = f.failf("unknown flag: --%s", name) + return + } + } + + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if flag.NoOptDefVal != "" { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { + outArgs = args + + outShorts = shorthands[1:] + c := shorthands[0] + + flag, exists := f.shorthands[c] + if !exists { + switch { + case c == 'h': + f.usage() + err = ErrHelp + return + case f.ParseErrorsWhitelist.UnknownFlags: + // '-f=arg arg ...' + // we do not want to lose arg in this case + if len(shorthands) > 2 && shorthands[1] == '=' { + outShorts = "" + return + } + + outArgs = stripUnknownFlagValue(outArgs) + return + default: + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + } + + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + // '-f=arg' + value = shorthands[2:] + outShorts = "" + } else if flag.NoOptDefVal != "" { + // '-f' (arg was optional) + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + // '-farg' + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + // '-f arg' + value = args[0] + outArgs = args[1:] + } else { + // '-f' (arg was required) + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + + if flag.ShorthandDeprecated != "" { + fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + + err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } + return +} + +func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { + a = args + shorthands := s[1:] + + // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args, fn) + } else { + args, err = f.parseShortArg(s, args, fn) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + if f.addedGoFlagSets != nil { + for _, goFlagSet := range f.addedGoFlagSets { + goFlagSet.Parse(nil) + } + } + f.parsed = true + + if len(arguments) < 0 { + return nil + } + + f.args = make([]string, 0, len(arguments)) + + set := func(flag *Flag, value string) error { + return f.Set(flag.Name, value) + } + + err := f.parseArgs(arguments, set) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + fmt.Println(err) + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +type parseFunc func(flag *Flag, value string) error + +// ParseAll parses flag definitions from the argument list, which should not +// include the command name. The arguments for fn are flag and value. Must be +// called after all flags in the FlagSet are defined and before flags are +// accessed by the program. The return value will be ErrHelp if -help was set +// but not defined. +func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + + err := f.parseArgs(arguments, fn) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. +// The arguments for fn are flag and value. Must be called after all flags are +// defined and before flags are accessed by the program. +func ParseAll(fn func(flag *Flag, value string) error) { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.ParseAll(os.Args[1:], fn) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name, +// error handling property and SortFlags set to true. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + SortFlags: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/vendor/github.com/dnephin/pflag/float32.go b/vendor/github.com/dnephin/pflag/float32.go new file mode 100644 index 00000000..a243f81f --- /dev/null +++ b/vendor/github.com/dnephin/pflag/float32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/float32_slice.go b/vendor/github.com/dnephin/pflag/float32_slice.go new file mode 100644 index 00000000..caa35274 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/float64.go b/vendor/github.com/dnephin/pflag/float64.go new file mode 100644 index 00000000..04b5492a --- /dev/null +++ b/vendor/github.com/dnephin/pflag/float64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/float64_slice.go b/vendor/github.com/dnephin/pflag/float64_slice.go new file mode 100644 index 00000000..85bf3073 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/golangflag.go b/vendor/github.com/dnephin/pflag/golangflag.go new file mode 100644 index 00000000..d3dd72b7 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/golangflag.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "reflect" + "strings" +) + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) + if f.addedGoFlagSets == nil { + f.addedGoFlagSets = make([]*goflag.FlagSet, 0) + } + f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) +} diff --git a/vendor/github.com/dnephin/pflag/int.go b/vendor/github.com/dnephin/pflag/int.go new file mode 100644 index 00000000..1474b89d --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int16.go b/vendor/github.com/dnephin/pflag/int16.go new file mode 100644 index 00000000..f1a01d05 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int16 Value +type int16Value int16 + +func newInt16Value(val int16, p *int16) *int16Value { + *p = val + return (*int16Value)(p) +} + +func (i *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + *i = int16Value(v) + return err +} + +func (i *int16Value) Type() string { + return "int16" +} + +func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 16) + if err != nil { + return 0, err + } + return int16(v), nil +} + +// GetInt16 returns the int16 value of a flag with the given name +func (f *FlagSet) GetInt16(name string) (int16, error) { + val, err := f.getFlagType(name, "int16", int16Conv) + if err != nil { + return 0, err + } + return val.(int16), nil +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func Int16Var(p *int16, name string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, "", value, usage) + return p +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, shorthand, value, usage) + return p +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func Int16(name string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, "", value, usage) +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func Int16P(name, shorthand string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int32.go b/vendor/github.com/dnephin/pflag/int32.go new file mode 100644 index 00000000..9b95944f --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int32_slice.go b/vendor/github.com/dnephin/pflag/int32_slice.go new file mode 100644 index 00000000..ff128ff0 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int64.go b/vendor/github.com/dnephin/pflag/int64.go new file mode 100644 index 00000000..0026d781 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int64_slice.go b/vendor/github.com/dnephin/pflag/int64_slice.go new file mode 100644 index 00000000..25464638 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int8.go b/vendor/github.com/dnephin/pflag/int8.go new file mode 100644 index 00000000..4da92228 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/int_slice.go b/vendor/github.com/dnephin/pflag/int_slice.go new file mode 100644 index 00000000..e71c39d9 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/int_slice.go @@ -0,0 +1,158 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/ip.go b/vendor/github.com/dnephin/pflag/ip.go new file mode 100644 index 00000000..3d414ba6 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/ip.go @@ -0,0 +1,94 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/ip_slice.go b/vendor/github.com/dnephin/pflag/ip_slice.go new file mode 100644 index 00000000..775faae4 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/ip_slice.go @@ -0,0 +1,186 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipSlice Value +type ipSliceValue struct { + value *[]net.IP + changed bool +} + +func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { + ipsv := new(ipSliceValue) + ipsv.value = p + *ipsv.value = val + return ipsv +} + +// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. +// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. +func (s *ipSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IP, 0, len(ipStrSlice)) + for _, ipStr := range ipStrSlice { + ip := net.ParseIP(strings.TrimSpace(ipStr)) + if ip == nil { + return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) + } + out = append(out, ip) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipSliceValue) Type() string { + return "ipSlice" +} + +// String defines a "native" format for this net.IP slice flag value. +func (s *ipSliceValue) String() string { + + ipStrSlice := make([]string, len(*s.value)) + for i, ip := range *s.value { + ipStrSlice[i] = ip.String() + } + + out, _ := writeAsCSV(ipStrSlice) + + return "[" + out + "]" +} + +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func ipSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IP{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IP, len(ss)) + for i, sval := range ss { + ip := net.ParseIP(strings.TrimSpace(sval)) + if ip == nil { + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) + } + out[i] = ip + } + return out, nil +} + +// GetIPSlice returns the []net.IP value of a flag with the given name +func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { + val, err := f.getFlagType(name, "ipSlice", ipSliceConv) + if err != nil { + return []net.IP{}, err + } + return val.([]net.IP), nil +} + +// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + f.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. +// The argument p points to a []net.IP variable in which to store the value of the flag. +func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) +} + +// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { + CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of that flag. +func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + p := []net.IP{} + f.IPSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPSlice defines a []net.IP flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPSlice(name string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, "", value, usage) +} + +// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. +func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { + return CommandLine.IPSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/ipmask.go b/vendor/github.com/dnephin/pflag/ipmask.go new file mode 100644 index 00000000..5bd44bd2 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/ipnet.go b/vendor/github.com/dnephin/pflag/ipnet.go new file mode 100644 index 00000000..e2c1b8bc --- /dev/null +++ b/vendor/github.com/dnephin/pflag/ipnet.go @@ -0,0 +1,98 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string.go b/vendor/github.com/dnephin/pflag/string.go new file mode 100644 index 00000000..04e0a26f --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string.go @@ -0,0 +1,80 @@ +package pflag + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return string(*s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string_array.go b/vendor/github.com/dnephin/pflag/string_array.go new file mode 100644 index 00000000..4894af81 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string_array.go @@ -0,0 +1,129 @@ +package pflag + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string_slice.go b/vendor/github.com/dnephin/pflag/string_slice.go new file mode 100644 index 00000000..3cb2e69d --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string_slice.go @@ -0,0 +1,163 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "strings" +) + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + +func stringSliceConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" --ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" --ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" --ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" --ss="v3" +// will result in +// []string{"v1", "v2", "v3"} +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string_to_int.go b/vendor/github.com/dnephin/pflag/string_to_int.go new file mode 100644 index 00000000..5ceda396 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string_to_int.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt Value +type stringToIntValue struct { + value *map[string]int + changed bool +} + +func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { + ssv := new(stringToIntValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToIntValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToIntValue) Type() string { + return "stringToInt" +} + +func (s *stringToIntValue) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.Itoa(v)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToIntConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt return the map[string]int value of a flag with the given name +func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { + val, err := f.getFlagType(name, "stringToInt", stringToIntConv) + if err != nil { + return map[string]int{}, err + } + return val.(map[string]int), nil +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, "", value, usage) + return &p +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt(name string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, "", value, usage) +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string_to_int64.go b/vendor/github.com/dnephin/pflag/string_to_int64.go new file mode 100644 index 00000000..a807a04a --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/string_to_string.go b/vendor/github.com/dnephin/pflag/string_to_string.go new file mode 100644 index 00000000..890a01af --- /dev/null +++ b/vendor/github.com/dnephin/pflag/string_to_string.go @@ -0,0 +1,160 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" +) + +// -- stringToString Value +type stringToStringValue struct { + value *map[string]string + changed bool +} + +func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { + ssv := new(stringToStringValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToStringValue) Set(val string) error { + var ss []string + n := strings.Count(val, "=") + switch n { + case 0: + return fmt.Errorf("%s must be formatted as key=value", val) + case 1: + ss = append(ss, strings.Trim(val, `"`)) + default: + r := csv.NewReader(strings.NewReader(val)) + var err error + ss, err = r.Read() + if err != nil { + return err + } + } + + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToStringValue) Type() string { + return "stringToString" +} + +func (s *stringToStringValue) String() string { + records := make([]string, 0, len(*s.value)>>1) + for k, v := range *s.value { + records = append(records, k+"="+v) + } + + var buf bytes.Buffer + w := csv.NewWriter(&buf) + if err := w.Write(records); err != nil { + panic(err) + } + w.Flush() + return "[" + strings.TrimSpace(buf.String()) + "]" +} + +func stringToStringConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]string{}, nil + } + r := csv.NewReader(strings.NewReader(val)) + ss, err := r.Read() + if err != nil { + return nil, err + } + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + return out, nil +} + +// GetStringToString return the map[string]string value of a flag with the given name +func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { + val, err := f.getFlagType(name, "stringToString", stringToStringConv) + if err != nil { + return map[string]string{}, err + } + return val.(map[string]string), nil +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, "", value, usage) + return &p +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToString(name string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, "", value, usage) +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint.go b/vendor/github.com/dnephin/pflag/uint.go new file mode 100644 index 00000000..dcbc2b75 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint16.go b/vendor/github.com/dnephin/pflag/uint16.go new file mode 100644 index 00000000..7e9914ed --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint32.go b/vendor/github.com/dnephin/pflag/uint32.go new file mode 100644 index 00000000..d8024539 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint32 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} + +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint64.go b/vendor/github.com/dnephin/pflag/uint64.go new file mode 100644 index 00000000..f62240f2 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint64.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint8.go b/vendor/github.com/dnephin/pflag/uint8.go new file mode 100644 index 00000000..bb0e83c1 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/dnephin/pflag/uint_slice.go b/vendor/github.com/dnephin/pflag/uint_slice.go new file mode 100644 index 00000000..5fa92483 --- /dev/null +++ b/vendor/github.com/dnephin/pflag/uint_slice.go @@ -0,0 +1,168 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- uintSlice Value +type uintSliceValue struct { + value *[]uint + changed bool +} + +func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { + uisv := new(uintSliceValue) + uisv.value = p + *uisv.value = val + return uisv +} + +func (s *uintSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return err + } + out[i] = uint(u) + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *uintSliceValue) Type() string { + return "uintSlice" +} + +func (s *uintSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func uintSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []uint{}, nil + } + ss := strings.Split(val, ",") + out := make([]uint, len(ss)) + for i, d := range ss { + u, err := strconv.ParseUint(d, 10, 0) + if err != nil { + return nil, err + } + out[i] = uint(u) + } + return out, nil +} + +// GetUintSlice returns the []uint value of a flag with the given name. +func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { + val, err := f.getFlagType(name, "uintSlice", uintSliceConv) + if err != nil { + return []uint{}, err + } + return val.([]uint), nil +} + +// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. +// The argument p points to a []uint variable in which to store the value of the flag. +func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + f.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. +// The argument p points to a uint[] variable in which to store the value of the flag. +func UintSliceVar(p *[]uint, name string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) +} + +// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. +func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { + CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, "", value, usage) + return &p +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + p := []uint{} + f.UintSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// UintSlice defines a []uint flag with specified name, default value, and usage string. +// The return value is the address of a []uint variable that stores the value of the flag. +func UintSlice(name string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, "", value, usage) +} + +// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. +func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { + return CommandLine.UintSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000..25fdaf63 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000..be82827c --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,176 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set to a non-empty string. + +`Color` has support to disable/enable colors programmatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`-no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + +## Credits + +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000..889f9e77 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,616 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default, + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorIsSet() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprint(w, c.format()) + return c +} + +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.SetWriter(w) + defer c.UnsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise, this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 00000000..be01c558 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000..9491ad54 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,134 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However, there are times when custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000..fad89585 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 00000000..32f1001b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000..4cd0cbaf --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 00000000..a04f2907 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000..6cbabe5e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,62 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Alexey Kazakov +Amit Krishnan +Anmol Sethi +Bjรธrn Erik Pedersen +Brian Goff +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Eric Lin +Evan Phoenix +Francisco Souza +Gautam Dey +Hari haran +Ichinose Shogo +Johannes Ebke +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Matthias Stone +Nathan Youngman +Nickolai Zeldovich +Oliver Bristow +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pratik Shinde +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tobias Klauser +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +้“ๅ“ฅ diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000..cc01c08f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,357 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000..8a642563 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000..e180c8fb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000..0731c5ef --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,120 @@ +# File system notifications for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) + +fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000..b3ac3d8f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,38 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000..0f4ee52e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go new file mode 100644 index 00000000..59688559 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000..a6d0e0ec --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,351 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000..b572a37c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000..6fb8d853 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,535 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000..36cc3845 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000..98cd8476 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000..02ce7deb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,586 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + // TODO: Consider using unsafe.Slice that is available from go1.17 + // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang + // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := syscall.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore new file mode 100644 index 00000000..eb29ebae --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md new file mode 100644 index 00000000..3305db0f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md new file mode 100644 index 00000000..b63e1f8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md new file mode 100644 index 00000000..b90c7e5c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -0,0 +1,122 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: + + import "gopkg.in/go-jose/go-jose.v2" + +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): + + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go new file mode 100644 index 00000000..78abc326 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go new file mode 100644 index 00000000..af029cec --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go new file mode 100644 index 00000000..f62c3bdb --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go new file mode 100644 index 00000000..093c6467 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go new file mode 100644 index 00000000..b9effbca --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go new file mode 100644 index 00000000..6901137e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -0,0 +1,544 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "github.com/go-jose/go-jose/v3/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, _ = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go new file mode 100644 index 00000000..71ec1c41 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go new file mode 100644 index 00000000..968a4249 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v3/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md new file mode 100644 index 00000000..86de5e55 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go new file mode 100644 index 00000000..4dbc4146 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go new file mode 100644 index 00000000..ea0a1361 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML