diff --git a/.github/workflows/cross-repo-issue.yml b/.github/workflows/cross-repo-issue.yml new file mode 100644 index 00000000..8cbcb35e --- /dev/null +++ b/.github/workflows/cross-repo-issue.yml @@ -0,0 +1,28 @@ +name: Cross-repo Issue Creation + +on: + pull_request: + types: [closed] + branches: + - "master" + +jobs: + cross-repo: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@v1.8.0 + with: + app_id: ${{ secrets.XREPO_APP_ID }} + private_key: ${{ secrets.XREPO_PEM }} + - name: create issue in other repo + if: "!contains(github.event.pull_request.labels.*.name, 'do not port') && github.event.pull_request.merged" + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + run: | + echo -e "A PR was merged over on PBC-Go\n\n- [https://github.com/prebid/prebid-cache/pull/${{github.event.number}}](https://github.com/prebid/prebid-cache/pull/${{github.event.number}})\n- timestamp: ${{ github.event.pull_request.merged_at}}" > msg + export msg=$(cat msg) + gh issue create --repo prebid/prebid-cache-java --title "Port PR from PBC-Go: ${{ github.event.pull_request.title }}" \ + --body "$msg" \ + --label auto diff --git a/.github/workflows/issue_prioritization.yml b/.github/workflows/issue_prioritization.yml new file mode 100644 index 00000000..a276172b --- /dev/null +++ b/.github/workflows/issue_prioritization.yml @@ -0,0 +1,100 @@ +name: Issue tracking +on: + issues: + types: + - opened +jobs: + track_issue: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@v1.8.0 + with: + app_id: ${{ secrets.PBS_PROJECT_APP_ID }} + private_key: ${{ secrets.PBS_PROJECT_APP_PEM }} + + - name: Get project data + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + ORGANIZATION: prebid + DATE_FIELD: Created on + PROJECT_NUMBER: 4 + run: | + gh api graphql -f query=' + query($org: String!, $number: Int!) { + organization(login: $org){ + projectV2(number: $number) { + id + fields(first:100) { + nodes { + ... on ProjectV2Field { + id + name + } + ... on ProjectV2SingleSelectField { + id + name + options { + id + name + } + } + } + } + } + } + }' -f org=$ORGANIZATION -F number=$PROJECT_NUMBER > project_data.json + + echo 'PROJECT_ID='$(jq '.data.organization.projectV2.id' project_data.json) >> $GITHUB_ENV + echo 'DATE_FIELD_ID='$(jq '.data.organization.projectV2.fields.nodes[] | select(.name== "'"$DATE_FIELD"'") | .id' project_data.json) >> $GITHUB_ENV + + - name: Add issue to project + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + ISSUE_ID: ${{ github.event.issue.node_id }} + run: | + gh api graphql -f query=' + mutation($project:ID!, $issue:ID!) { + addProjectV2ItemById(input: {projectId: $project, contentId: $issue}) { + item { + id, + content { + ... on Issue { + createdAt + } + ... on PullRequest { + createdAt + } + } + } + } + }' -f project=$PROJECT_ID -f issue=$ISSUE_ID > issue_data.json + + echo 'ITEM_ID='$(jq '.data.addProjectV2ItemById.item.id' issue_data.json) >> $GITHUB_ENV + echo 'ITEM_CREATION_DATE='$(jq '.data.addProjectV2ItemById.item.content.createdAt' issue_data.json | cut -c 2-11) >> $GITHUB_ENV + + - name: Set fields + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + run: | + gh api graphql -f query=' + mutation ( + $project: ID! + $item: ID! + $date_field: ID! + $date_value: Date! + ) { + set_creation_date: updateProjectV2ItemFieldValue(input: { + projectId: $project + itemId: $item + fieldId: $date_field + value: { + date: $date_value + } + }) { + projectV2Item { + id + } + } + }' -f project=$PROJECT_ID -f item=$ITEM_ID -f date_field=$DATE_FIELD_ID -f date_value=$ITEM_CREATION_DATE --silent diff --git a/.github/workflows/package.yml b/.github/workflows/package.yml deleted file mode 100644 index 5f5f1663..00000000 --- a/.github/workflows/package.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Package - -on: - push: - tags: - - '[0-9]+.[0-9]+.[0-9]+' - -jobs: - Package: - name: Package and Publish Docker Image - if: github.event.base_ref == 'refs/heads/master' - runs-on: ubuntu-latest - - steps: - - - name: Get Version - id: get_version - run: | - echo ::set-output name=version::${GITHUB_REF/refs\/tags\/} - - - name: Checkout Branch - uses: actions/checkout@v2 - - - name: Build - run: | - docker build --build-arg DEPLOY_VERSION=${{ steps.get_version.outputs.version }} -t docker.io/prebid/prebid-cache:${{ steps.get_version.outputs.version }} . - - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USER }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Publish to Docker Hub - run: | - docker push docker.io/prebid/prebid-cache:${{ steps.get_version.outputs.version }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8167c837..6458272d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,26 +1,126 @@ name: Release on: - push: - tags: - - '[0-9]+.[0-9]+.[0-9]+' + workflow_dispatch: + inputs: + releaseType: + type: choice + options: + - minor + - patch + default: minor + required: true + description: 'minor: v0.X.0, patch: v0.0.X' + debug: + type: boolean + default: true + description: 'executes the workflow in debug mode (skip the publishing tag, docker image and release steps)' jobs: - release: - name: Create Release - if: github.event.base_ref == 'refs/heads/master' + check-permission: + name: Check permission + if: contains(github.ref, 'refs/heads/master') runs-on: ubuntu-latest + permissions: + contents: read steps: - - name: Get Version - id: get_version + - name: Check user permission + uses: actions-cool/check-user-permission@v2.2.0 + id: check + with: + require: 'write' + outputs: + hasWritePermission: ${{ steps.check.outputs.require-result }} + + publish-tag: + name: Publish tag + needs: check-permission + if: contains(needs.check-permission.outputs.hasWritePermission, 'true') + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout prebid cache + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Create & publish tag + id: release + run: | + currentTag=$(git describe --abbrev=0 --tags) + echo "Current release tag ${currentTag}" + + echo ${currentTag} | grep -q "^v\?[0-9]\+\.[0-9]\+\.[0-9]\+$" + if [ $? -ne 0 ]; then + echo "Current tag format won't let us compute the new tag name. Required format v[0-9]\+\.[0-9]\+\.[0-9]\+" + exit 1 + fi + + if [[ "${currentTag:0:1}" != "v" ]]; then + currentTag="v${currentTag}" + fi + + nextTag='' + releaseType=${{ inputs.releaseType }} + if [ $releaseType == "minor" ]; then + # increment minor version and reset patch version + nextTag=$(echo "${currentTag}" | awk -F. '{OFS="."; $2+=1; $3=0; print $0}') + else + # increment patch version + nextTag=$(echo "${currentTag}" | awk -F. '{OFS="."; $3+=1; print $0}') + fi + + if [ ${{ inputs.debug }} == 'true' ]; then + echo "running workflow in debug mode, next ${releaseType} tag: ${nextTag}" + else + git tag $nextTag + git push origin $nextTag + echo "tag=${nextTag}" >> $GITHUB_OUTPUT + fi + outputs: + releaseTag: ${{ steps.release.outputs.tag }} + + publish-docker-image: + name: Publish docker image + needs: publish-tag + if: contains(inputs.debug, 'false') + runs-on: ubuntu-latest + steps: + - name: Checkout prebid cache + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Build image + run: | + docker build --build-arg DEPLOY_VERSION=${{ needs.publish-tag.outputs.releaseTag }} -t docker.io/prebid/prebid-cache:${{ needs.publish-tag.outputs.releaseTag }} . + - name: Login to docker Hub + if: contains(inputs.debug, 'false') + uses: docker/login-action@v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Publish to docker Hub run: | - echo ::set-output name=tag::${GITHUB_REF/refs\/tags\/} - - name: Create & Publish Release - uses: release-drafter/release-drafter@v5.12.1 + docker push docker.io/prebid/prebid-cache:${{ needs.publish-tag.outputs.releaseTag }} + + publish-release: + name: Publish release + needs: [publish-tag, publish-docker-image] + if: contains(inputs.debug, 'false') + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout prebid cache + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Create & publish release + uses: release-drafter/release-drafter@v5.22.0 with: - name: ${{ steps.get_version.outputs.tag }} - tag: ${{ steps.get_version.outputs.tag }} - version: ${{ steps.get_version.outputs.tag }} + name: ${{ needs.publish-tag.outputs.releaseTag }} + tag: ${{ needs.publish-tag.outputs.releaseTag }} + version: ${{ needs.publish-tag.outputs.releaseTag }} publish: true env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/validate-merge.yml b/.github/workflows/validate-merge.yml index 26f3b694..54e09512 100644 --- a/.github/workflows/validate-merge.yml +++ b/.github/workflows/validate-merge.yml @@ -6,16 +6,16 @@ on: jobs: validate-merge: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: - go-version: 1.16.4 + go-version: 1.19.5 - name: Checkout Merged Branch - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Validate run: | diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 0d32f6cf..942daf60 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -10,17 +10,17 @@ jobs: validate: strategy: matrix: - go-version: [1.15.x, 1.16.x] - runs-on: ubuntu-18.04 + go-version: [1.18.x, 1.19.x] + runs-on: ubuntu-20.04 steps: - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: go-version: ${{ matrix.go-version }} - name: Checkout Branch - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # Resolves to empty string for push events and falls back to HEAD. # See: https://github.com/actions/checkout#checkout-pull-request-head-commit-instead-of-merge-commit diff --git a/.gitignore b/.gitignore index c1807165..1e16f692 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# dep directory vendor + +# build artifacts prebid-cache + +# ide +.vscode/ + +# autogenerated mac file + +.DS_Store + +# Autogenerated Vim swap files +*~ +*.swp +*.swo diff --git a/Dockerfile b/Dockerfile index e3a8d7df..192acfc0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -FROM ubuntu:18.04 AS build +FROM ubuntu:22.04 AS build RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y wget -ENV GO_INSTALLER=go1.16.4.linux-amd64.tar.gz + apt-get install -y --no-install-recommends wget ca-certificates +ENV GO_INSTALLER=go1.19.5.linux-amd64.tar.gz WORKDIR /tmp RUN wget https://dl.google.com/go/$GO_INSTALLER && \ tar -C /usr/local -xzf $GO_INSTALLER @@ -12,7 +12,7 @@ ENV GOROOT=/usr/local/go ENV PATH=$GOROOT/bin:$PATH ENV GOPROXY="https://proxy.golang.org" RUN apt-get update && \ - apt-get install -y git && \ + apt-get install -y --no-install-recommends git && \ apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ENV CGO_ENABLED 0 COPY ./ ./ @@ -22,19 +22,18 @@ ARG TEST="true" RUN if [ "$TEST" != "false" ]; then ./validate.sh ; fi RUN go build -mod=vendor -ldflags "-X github.com/prebid/prebid-cache/version.Ver=`git describe --tags` -X github.com/prebid/prebid-cache/version.Rev=`git rev-parse HEAD`" . -FROM ubuntu:18.04 AS release +FROM ubuntu:22.04 AS release LABEL maintainer="hans.hjort@xandr.com" RUN apt-get update && \ - apt-get install --assume-yes apt-utils && \ - apt-get install -y ca-certificates && \ + apt-get install -y --no-install-recommends ca-certificates && \ apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* WORKDIR /usr/local/bin/ COPY --from=build /app/prebid-cache/prebid-cache . RUN chmod a+xr prebid-cache COPY --from=build /app/prebid-cache/config.yaml . RUN chmod a+r config.yaml -RUN adduser prebid_user -USER prebid_user +RUN addgroup --system --gid 2001 prebidgroup && adduser --system --uid 1001 --ingroup prebidgroup prebid +USER prebid EXPOSE 2424 EXPOSE 2525 ENTRYPOINT ["/usr/local/bin/prebid-cache"] diff --git a/README.md b/README.md index 6f84e7ed..6ac453ea 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ This application stores short-term data for use in Prebid Server and Prebid.js, ## Installation -First install Go version 1.15 or newer. +First install Go version 1.18 or newer. -Note that prebid-cache is using Go modules. We officially support the most recent two major versions of the Go runtime. However, if you'd like to use a version <1.13 and are inside `GOPATH` `GO111MODULE` needs to be set to `GO111MODULE=on`. +Note that prebid-cache uses Go modules. We officially support the most recent two major versions of the Go runtime. However, if you'd like to use a version <1.13 and are inside `GOPATH` `GO111MODULE` needs to be set to `GO111MODULE=on`. Download and prepare Prebid Cache: @@ -121,7 +121,7 @@ This would result in a response like this: Prebid Cache will use the custom keys for those elements that include them and will create system-generated `uuid`s for the ones that don't. Note that if configuration flag `allow_setting_keys` is set to `false` or simply not set inside the `config.yaml` file, Prebid Cache would generate random `uuid`s for all elements and not use the custom keys `"CustomKeyValueHere"` nor `"AnotherCustomKeyValue"` at all. -#### Overwritting values +#### Overwriting values Prebid Cache does not allow overwritting any value, for either autogenerated or custom keys. If an entry already exists for a given key, it will not be overwitten, and an empty string will be returned as the `uuid` value of that entry. Suppose we wanted to overwrite the entries under `"CustomKeyValueHere"` and the system-generated key `"147c9934-894b-4c1f-9a32-e7bb9cd15376"`. @@ -208,7 +208,11 @@ This section does not describe permanent API contracts; it just describes limita ## Backend Configuration -In order to store its data a Prebid Cache instance can use either of the following storage services: Aerospike, Cassandra, Memcache, Redis, or local memory. Select the storage service your Prebid Cache server will use by setting the `backend.type` property in the `config.yaml` file: +Prebid Cache requires a backend data store which enforces TTL expiration. The following storage options are supported: Aerospike, Cassandra, Memcache, and Redis. You're welcomed to contribute a new backend adapter if needed. + +There is also an option (enabled by default) for a basic in-memory data store intended only for development. This backend does not support TTL expiration and is not built for production use. + +To configure, select the storage service for your Prebid Cache server by setting the `backend.type` property in the `config.yaml` file: ```yaml backend: @@ -216,6 +220,7 @@ backend: ``` ### Aerospike +Prebid Cache makes use of an Aerospike Go client that requires Aerospike server version 4.9+ and will not work properly with older versions. Full documentation of the Aerospike Go client can be found [here](https://github.com/aerospike/aerospike-client-go/tree/v6). | Configuration field | Type | Description | | --- | --- | --- | | host | string | aerospike server URI | @@ -223,6 +228,7 @@ backend: | namespace | string | aerospike service namespace where keys get initialized | ### Cassandra +Prebid Cache makes use of a Cassandra client that supports latest 3 major releases of Cassandra (2.1.x, 2.2.x, and 3.x.x). Full documentation of the Cassandra Go client can be found [here](https://github.com/gocql/gocql). | Configuration field | Type | Description | | --- | --- | --- | | hosts | string | Cassandra server URI | @@ -236,6 +242,7 @@ backend: | hosts | string array | List of nodes when not using auto discovery | ### Redis: +Prebid Cache makes use of a Redis Go client compatible with Redis 6. Full documentation of the Redis Go client Prebid Cache uses can be found [here](https://github.com/go-redis/redis). | Configuration field | Type | Description | | --- | --- | --- | | host | string | Redis server URI | @@ -308,14 +315,14 @@ routes: ### Prerequisites -[Golang](https://golang.org/doc/install) 1.9.1 or greater and [Dep](https://github.com/golang/dep#installation) must be installed on your system. +[Golang](https://golang.org/doc/install) 1.18.x or newer. -### Automated tests +### Automated Tests `./validate.sh` runs the unit tests and reformats your code with [gofmt](https://golang.org/cmd/gofmt/). `./validate.sh --nofmt` runs the unit tests, but will _not_ reformat your code. -### Manual testing +### Manual Testing Run `prebid-cache` locally with: @@ -324,7 +331,7 @@ go build . ./prebid-cache ``` -The service will respond to requests on `localhost:2424`, and the admin data will be available on `localhost:2525` +The service will respond to requests on `localhost:2424`, and the admin data will be available on `localhost:2525`. The port numbers can be configured to different values in your production deployment. ### Configuration @@ -374,4 +381,4 @@ docker run -p 8000:8000 -t prebid-cache ### Profiling -[pprof stats](http://artem.krylysov.com/blog/2017/03/13/profiling-and-optimizing-go-web-applications/) can be accessed from a running app on `localhost:2525` +[pprof stats](http://artem.krylysov.com/blog/2017/03/13/profiling-and-optimizing-go-web-applications/) can be accessed from a running app on the admin port `localhost:2525`. \ No newline at end of file diff --git a/backends/aerospike.go b/backends/aerospike.go index 8be235aa..bb1c4200 100644 --- a/backends/aerospike.go +++ b/backends/aerospike.go @@ -3,9 +3,11 @@ package backends import ( "context" "errors" + "fmt" + "time" - as "github.com/aerospike/aerospike-client-go" - as_types "github.com/aerospike/aerospike-client-go/types" + as "github.com/aerospike/aerospike-client-go/v6" + as_types "github.com/aerospike/aerospike-client-go/v6/types" "github.com/prebid/prebid-cache/config" "github.com/prebid/prebid-cache/metrics" "github.com/prebid/prebid-cache/utils" @@ -51,9 +53,52 @@ type AerospikeBackend struct { } // NewAerospikeBackend validates config.Aerospike and returns an AerospikeBackend + +type NewAerospikeClientFunc func(*as.ClientPolicy, ...*as.Host) (*as.Client, as.Error) + func NewAerospikeBackend(cfg config.Aerospike, metrics *metrics.Metrics) *AerospikeBackend { - var hosts []*as.Host + return newAerospikeBackend(as.NewClientWithPolicyAndHost, cfg, metrics) +} + +func newAerospikeBackend(newAerospikeClient NewAerospikeClientFunc, cfg config.Aerospike, metrics *metrics.Metrics) *AerospikeBackend { + clientPolicy := generateAerospikeClientPolicy(cfg) + hosts, err := generateHostsList(cfg) + if err != nil { + log.Fatalf("Error creating Aerospike backend: %s", err.Error()) + return nil + } + + client, err := newAerospikeClient(clientPolicy, hosts...) + if err != nil { + log.Fatalf("Error creating Aerospike backend: %s", classifyAerospikeError(err).Error()) + panic("AerospikeBackend failure. This shouldn't happen.") + } + log.Infof("Connected to Aerospike host(s) %v on port %d", append(cfg.Hosts, cfg.Host), cfg.Port) + + // client.DefaultPolicy.MaxRetries determines the maximum number of retries before aborting a transaction. + // Default for read: 2 (initial attempt + 2 retries = 3 attempts) + if cfg.MaxReadRetries > 2 { + client.DefaultPolicy.MaxRetries = cfg.MaxReadRetries + } + + // client.DefaultWritePolicy.MaxRetries determines the maximum number of retries for write before aborting + // a transaction. Prebid Cache uses the Aerospike backend to do CREATE_ONLY writes, which are idempotent so + // it's safe to increase the maximum value of write retries. + // Default for write: 0 (no retries) + if cfg.MaxWriteRetries > 0 { + client.DefaultWritePolicy.MaxRetries = cfg.MaxWriteRetries + } + + return &AerospikeBackend{ + namespace: cfg.Namespace, + client: &AerospikeDBClient{client}, + metrics: metrics, + } +} +// generateAerospikeClientPolicy returns an Aerospike ClientPolicy object configured according to values +// in config.Aerospike fields +func generateAerospikeClientPolicy(cfg config.Aerospike) *as.ClientPolicy { clientPolicy := as.NewClientPolicy() // cfg.User and cfg.Password are optional parameters // if left blank in the config, they will default to the empty @@ -61,6 +106,25 @@ func NewAerospikeBackend(cfg config.Aerospike, metrics *metrics.Metrics) *Aerosp clientPolicy.User = cfg.User clientPolicy.Password = cfg.Password + // Connection idle timeout default is 55 seconds + if cfg.ConnIdleTimeoutSecs > 0 { + clientPolicy.IdleTimeout = time.Duration(cfg.ConnIdleTimeoutSecs) * time.Second + } + + // Default connection queue size per node is 256 + if cfg.ConnQueueSize > 0 { + clientPolicy.ConnectionQueueSize = cfg.ConnQueueSize + } + + return clientPolicy +} + +func generateHostsList(cfg config.Aerospike) ([]*as.Host, error) { + var hosts []*as.Host + + if cfg.Port <= 0 { + return nil, fmt.Errorf("Cannot connect to Aerospike host at port %d", cfg.Port) + } if len(cfg.Host) > 1 { hosts = append(hosts, as.NewHost(cfg.Host, cfg.Port)) log.Info("config.backend.aerospike.host is being deprecated in favor of config.backend.aerospike.hosts") @@ -68,19 +132,10 @@ func NewAerospikeBackend(cfg config.Aerospike, metrics *metrics.Metrics) *Aerosp for _, host := range cfg.Hosts { hosts = append(hosts, as.NewHost(host, cfg.Port)) } - - client, err := as.NewClientWithPolicyAndHost(clientPolicy, hosts...) - if err != nil { - log.Fatalf("Error creating Aerospike backend: %s", classifyAerospikeError(err).Error()) - panic("AerospikeBackend failure. This shouldn't happen.") - } - log.Infof("Connected to Aerospike host(s) %v on port %d", append(cfg.Hosts, cfg.Host), cfg.Port) - - return &AerospikeBackend{ - namespace: cfg.Namespace, - client: &AerospikeDBClient{client}, - metrics: metrics, + if len(hosts) == 0 { + return nil, errors.New("Cannot connect to empty Aerospike host(s)") } + return hosts, nil } // Get creates an aerospike key based on the UUID key parameter, perfomrs the client's Get call @@ -134,11 +189,12 @@ func (a *AerospikeBackend) Put(ctx context.Context, key string, value string, tt func classifyAerospikeError(err error) error { if err != nil { - if aerr, ok := err.(as_types.AerospikeError); ok { - if aerr.ResultCode() == as_types.KEY_NOT_FOUND_ERROR { + ae := &as.AerospikeError{} + if errors.As(err, &ae) { + if errors.Is(err, &as.AerospikeError{ResultCode: as_types.KEY_NOT_FOUND_ERROR}) { return utils.NewPBCError(utils.KEY_NOT_FOUND) } - if aerr.ResultCode() == as_types.KEY_EXISTS_ERROR { + if errors.Is(err, &as.AerospikeError{ResultCode: as_types.KEY_EXISTS_ERROR}) { return utils.NewPBCError(utils.RECORD_EXISTS) } } diff --git a/backends/aerospike_test.go b/backends/aerospike_test.go index f9b1f287..85500eff 100644 --- a/backends/aerospike_test.go +++ b/backends/aerospike_test.go @@ -2,11 +2,13 @@ package backends import ( "context" + "errors" "fmt" "testing" + "time" - as "github.com/aerospike/aerospike-client-go" - as_types "github.com/aerospike/aerospike-client-go/types" + as "github.com/aerospike/aerospike-client-go/v6" + as_types "github.com/aerospike/aerospike-client-go/v6/types" "github.com/prebid/prebid-cache/config" "github.com/prebid/prebid-cache/metrics" "github.com/prebid/prebid-cache/metrics/metricstest" @@ -17,66 +19,281 @@ import ( "github.com/stretchr/testify/assert" ) -func TestNewAerospikeBackend(t *testing.T) { +func TestGenerateAerospikeClientPolicy(t *testing.T) { + testCases := []struct { + desc string + inCfg config.Aerospike + expected *as.ClientPolicy + }{ + { + desc: "Blank configuration", + inCfg: config.Aerospike{}, + expected: as.NewClientPolicy(), + }, + { + desc: "Config with credentials", + inCfg: config.Aerospike{ + User: "foobar", + Password: "password", + }, + expected: &as.ClientPolicy{ + User: "foobar", + Password: "password", + AuthMode: as.AuthModeInternal, + Timeout: 30 * time.Second, + IdleTimeout: 0 * time.Second, + LoginTimeout: 10 * time.Second, + ConnectionQueueSize: 100, + OpeningConnectionThreshold: 0, + FailIfNotConnected: true, + TendInterval: time.Second, + LimitConnectionsToQueueSize: true, + IgnoreOtherSubnetAliases: false, + MaxErrorRate: 100, + ErrorRateWindow: 1, + }, + }, + { + desc: "Config with ConnIdleTimeoutSecs", + inCfg: config.Aerospike{ + ConnIdleTimeoutSecs: 3600, + }, + expected: &as.ClientPolicy{ + AuthMode: as.AuthModeInternal, + Timeout: 30 * time.Second, + IdleTimeout: 3600 * time.Second, + LoginTimeout: 10 * time.Second, + ConnectionQueueSize: 100, + OpeningConnectionThreshold: 0, + FailIfNotConnected: true, + TendInterval: time.Second, + LimitConnectionsToQueueSize: true, + IgnoreOtherSubnetAliases: false, + MaxErrorRate: 100, + ErrorRateWindow: 1, + }, + }, + { + desc: "Config with ConnIdleTimeoutSecs", + inCfg: config.Aerospike{ + ConnQueueSize: 31416, + }, + expected: &as.ClientPolicy{ + AuthMode: as.AuthModeInternal, + Timeout: 30 * time.Second, + IdleTimeout: 0 * time.Second, + LoginTimeout: 10 * time.Second, + ConnectionQueueSize: 31416, + OpeningConnectionThreshold: 0, + FailIfNotConnected: true, + TendInterval: time.Second, + LimitConnectionsToQueueSize: true, + IgnoreOtherSubnetAliases: false, + MaxErrorRate: 100, + ErrorRateWindow: 1, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + asPolicy := generateAerospikeClientPolicy(tc.inCfg) + assert.Equal(t, tc.expected, asPolicy) + }) + } +} + +func TestGenerateHostsList(t *testing.T) { + type testOutput struct { + hosts []*as.Host + err error + } type logEntry struct { msg string lvl logrus.Level } - testCases := []struct { desc string inCfg config.Aerospike - expectPanic bool + expectedOut testOutput expectedLogEntries []logEntry }{ { - desc: "Unable to connect hosts fakeTestUrl panic and log fatal error when passed additional hosts", + desc: "no_port", + inCfg: config.Aerospike{}, + expectedOut: testOutput{ + err: errors.New("Cannot connect to Aerospike host at port 0"), + }, + }, + { + desc: "port_no_host_nor_hosts", + inCfg: config.Aerospike{Port: 8888}, + expectedOut: testOutput{ + err: errors.New("Cannot connect to empty Aerospike host(s)"), + }, + }, + { + desc: "port_and_hosts_no_host", inCfg: config.Aerospike{ - Hosts: []string{"foo.com", "bat.com"}, Port: 8888, + Hosts: []string{"foo.com", "bar.com"}, + }, + expectedOut: testOutput{ + hosts: []*as.Host{ + as.NewHost("foo.com", 8888), + as.NewHost("bar.com", 8888), + }, + }, + }, + { + desc: "port_and_host", + inCfg: config.Aerospike{ + Host: "foo.com", + Port: 8888, + }, + expectedOut: testOutput{ + hosts: []*as.Host{as.NewHost("foo.com", 8888)}, }, - expectPanic: true, expectedLogEntries: []logEntry{ { - msg: "Error creating Aerospike backend: Failed to connect to host(s): [foo.com:8888 bat.com:8888]; error: Connecting to the cluster timed out.", - lvl: logrus.FatalLevel, + msg: "config.backend.aerospike.host is being deprecated in favor of config.backend.aerospike.hosts", + lvl: logrus.InfoLevel, }, }, }, { - desc: "Unable to connect host and hosts panic and log fatal error when passed additional hosts", + desc: "Port_host_and_hosts", inCfg: config.Aerospike{ - Host: "fakeTestUrl.foo", - Hosts: []string{"foo.com", "bat.com"}, Port: 8888, + Host: "foo.com", + Hosts: []string{"foo.com", "bar.com"}, + }, + expectedOut: testOutput{ + hosts: []*as.Host{ + as.NewHost("foo.com", 8888), + as.NewHost("foo.com", 8888), + as.NewHost("bar.com", 8888), + }, }, - expectPanic: true, expectedLogEntries: []logEntry{ { msg: "config.backend.aerospike.host is being deprecated in favor of config.backend.aerospike.hosts", lvl: logrus.InfoLevel, }, + }, + }, + } + + // logrus entries will be recorded to this `hook` object so we can compare and assert them + hook := test.NewGlobal() + + //substitute logger exit function so execution doesn't get interrupted when log.Fatalf() call comes + defer func() { logrus.StandardLogger().ExitFunc = nil }() + logrus.StandardLogger().ExitFunc = func(int) {} + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + asHosts, err := generateHostsList(tc.inCfg) + assert.Equal(t, tc.expectedOut.err, err) + if assert.Len(t, asHosts, len(tc.expectedOut.hosts)) { + assert.ElementsMatch(t, tc.expectedOut.hosts, asHosts) + } + if assert.Len(t, hook.Entries, len(tc.expectedLogEntries)) { + for i := 0; i < len(tc.expectedLogEntries); i++ { + assert.Equal(t, tc.expectedLogEntries[i].lvl, hook.Entries[i].Level) + assert.Equal(t, tc.expectedLogEntries[i].msg, hook.Entries[i].Message) + } + } + //Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) + }) + } +} + +func TestNewAerospikeBackend(t *testing.T) { + type logEntry struct { + msg string + lvl logrus.Level + } + + errorProneNewClientFunc := func(*as.ClientPolicy, ...*as.Host) (*as.Client, as.Error) { + return nil, &as.AerospikeError{} + } + successfulNewClientFunc := func(*as.ClientPolicy, ...*as.Host) (*as.Client, as.Error) { + return nil, nil + } + + testCases := []struct { + desc string + inCfg config.Aerospike + newClientFunc NewAerospikeClientFunc + expectedLogEntries []logEntry + expectedPanic bool + }{ + { + desc: "no_port_error", + inCfg: config.Aerospike{}, + expectedLogEntries: []logEntry{ + { + msg: "Error creating Aerospike backend: Cannot connect to Aerospike host at port 0", + lvl: logrus.FatalLevel, + }, + }, + }, + { + desc: "no_host_nor_hosts_error", + inCfg: config.Aerospike{Port: 8888}, + expectedLogEntries: []logEntry{ + { + msg: "Error creating Aerospike backend: Cannot connect to empty Aerospike host(s)", + lvl: logrus.FatalLevel, + }, + }, + }, + { + desc: "newAerospikeClient_error", + inCfg: config.Aerospike{ + Hosts: []string{"fakeUrl"}, + Port: 8888, + }, + newClientFunc: errorProneNewClientFunc, + expectedLogEntries: []logEntry{ { - msg: "Error creating Aerospike backend: Failed to connect to host(s): [fakeTestUrl.foo:8888 foo.com:8888 bat.com:8888]; error: Connecting to the cluster timed out.", + msg: "Error creating Aerospike backend: ResultCode: OK, Iteration: 0, InDoubt: false, Node: : ", lvl: logrus.FatalLevel, }, }, + expectedPanic: true, }, { - desc: "Unable to connect hoost panic and log fatal error", + desc: "success_with_deprecated_host", inCfg: config.Aerospike{ - Host: "fakeTestUrl.foo", + Host: "fakeUrl", Port: 8888, }, - expectPanic: true, + newClientFunc: successfulNewClientFunc, expectedLogEntries: []logEntry{ { msg: "config.backend.aerospike.host is being deprecated in favor of config.backend.aerospike.hosts", lvl: logrus.InfoLevel, }, { - msg: "Error creating Aerospike backend: Failed to connect to host(s): [fakeTestUrl.foo:8888]; error: Connecting to the cluster timed out.", - lvl: logrus.FatalLevel, + msg: "Connected to Aerospike host(s) [fakeUrl] on port 8888", + lvl: logrus.InfoLevel, + }, + }, + }, + { + desc: "success_with_hosts_list", + inCfg: config.Aerospike{ + Hosts: []string{"fakeUrl"}, + Port: 8888, + }, + newClientFunc: successfulNewClientFunc, + expectedLogEntries: []logEntry{ + { + msg: "Connected to Aerospike host(s) [fakeUrl ] on port 8888", + lvl: logrus.InfoLevel, }, }, }, @@ -90,18 +307,29 @@ func TestNewAerospikeBackend(t *testing.T) { logrus.StandardLogger().ExitFunc = func(int) {} for _, test := range testCases { - // Run test - assert.Panics(t, func() { NewAerospikeBackend(test.inCfg, nil) }, "Aerospike library's NewClientWithPolicyAndHost() should have thrown an error and didn't, hence the panic didn't happen") - if assert.Len(t, hook.Entries, len(test.expectedLogEntries), test.desc) { - for i := 0; i < len(test.expectedLogEntries); i++ { - assert.Equal(t, test.expectedLogEntries[i].msg, hook.Entries[i].Message, test.desc) - assert.Equal(t, test.expectedLogEntries[i].lvl, hook.Entries[i].Level, test.desc) + t.Run(test.desc, func(t *testing.T) { + if test.expectedPanic { + if !assert.Panics(t, func() { newAerospikeBackend(test.newClientFunc, test.inCfg, nil) }, "Aerospike library's NewClientWithPolicyAndHost() should have thrown an error and didn't, hence the panic didn't happen") { + return + } + } else { + if !assert.NotPanics(t, func() { newAerospikeBackend(test.newClientFunc, test.inCfg, nil) }, "Aerospike library's NewClientWithPolicyAndHost() should have thrown an error and didn't, hence the panic didn't happen") { + return + } } - } - //Reset log after every test and assert successful reset - hook.Reset() - assert.Nil(t, hook.LastEntry()) + if assert.Len(t, hook.Entries, len(test.expectedLogEntries), test.desc) { + for i := 0; i < len(test.expectedLogEntries); i++ { + assert.Equal(t, test.expectedLogEntries[i].lvl, hook.Entries[i].Level, test.desc) + assert.Equal(t, test.expectedLogEntries[i].msg, hook.Entries[i].Message, test.desc) + } + } + + //Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) + }) + } } @@ -123,17 +351,17 @@ func TestClassifyAerospikeError(t *testing.T) { }, { desc: "Aerospike error is neither KEY_NOT_FOUND_ERROR nor KEY_EXISTS_ERROR, expect same error as output", - inErr: as_types.NewAerospikeError(as_types.SERVER_NOT_AVAILABLE), - expectedErr: as_types.NewAerospikeError(as_types.SERVER_NOT_AVAILABLE), + inErr: &as.AerospikeError{ResultCode: as_types.SERVER_NOT_AVAILABLE}, + expectedErr: &as.AerospikeError{ResultCode: as_types.SERVER_NOT_AVAILABLE}, }, { desc: "Aerospike KEY_NOT_FOUND_ERROR error, expect Prebid Cache's KEY_NOT_FOUND error", - inErr: as_types.NewAerospikeError(as_types.KEY_NOT_FOUND_ERROR), + inErr: &as.AerospikeError{ResultCode: as_types.KEY_NOT_FOUND_ERROR}, expectedErr: utils.NewPBCError(utils.KEY_NOT_FOUND), }, { desc: "Aerospike KEY_EXISTS_ERROR error, expect Prebid Cache's RECORD_EXISTS error", - inErr: as_types.NewAerospikeError(as_types.KEY_EXISTS_ERROR), + inErr: &as.AerospikeError{ResultCode: as_types.KEY_EXISTS_ERROR}, expectedErr: utils.NewPBCError(utils.RECORD_EXISTS), }, } @@ -166,42 +394,38 @@ func TestAerospikeClientGet(t *testing.T) { }{ { desc: "AerospikeBackend.Get() throws error when trying to generate new key", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_KEY_GEN_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_KEY_GEN_ERROR"}, expectedValue: "", - expectedErrorMsg: "Not authenticated", + expectedErrorMsg: "ResultCode: NOT_AUTHENTICATED, Iteration: 0, InDoubt: false, Node: : ", }, { desc: "AerospikeBackend.Get() throws error when 'client.Get(..)' gets called", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_GET_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_GET_ERROR"}, expectedValue: "", expectedErrorMsg: "Key not found", }, { desc: "AerospikeBackend.Get() throws error when 'client.Get(..)' returns a nil record", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_NIL_RECORD_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_NIL_RECORD_ERROR"}, expectedValue: "", expectedErrorMsg: "Nil record", }, { desc: "AerospikeBackend.Get() throws error no BIN_VALUE bucket is found", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_NO_BUCKET_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_NO_BUCKET_ERROR"}, expectedValue: "", expectedErrorMsg: "No 'value' bucket found", }, { desc: "AerospikeBackend.Get() returns a record that does not store a string", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_NON_STRING_VALUE_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_NON_STRING_VALUE_ERROR"}, expectedValue: "", expectedErrorMsg: "Unexpected non-string value found", }, { desc: "AerospikeBackend.Get() does not throw error", - inAerospikeClient: &goodAerospikeClient{ - records: map[string]*as.Record{ - "defaultKey": { - Bins: as.BinMap{binValue: "Default value"}, - }, - }, + inAerospikeClient: &GoodAerospikeClient{ + StoredData: map[string]string{"defaultKey": "Default value"}, }, expectedValue: "Default value", expectedErrorMsg: "", @@ -247,15 +471,15 @@ func TestClientPut(t *testing.T) { }{ { desc: "AerospikeBackend.Put() throws error when trying to generate new key", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_KEY_GEN_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_KEY_GEN_ERROR"}, inKey: "testKey", inValueToStore: "not default value", expectedStoredVal: "", - expectedErrorMsg: "Not authenticated", + expectedErrorMsg: "ResultCode: NOT_AUTHENTICATED, Iteration: 0, InDoubt: false, Node: : ", }, { desc: "AerospikeBackend.Put() throws error when 'client.Put(..)' gets called", - inAerospikeClient: &errorProneAerospikeClient{errorThrowingFunction: "TEST_PUT_ERROR"}, + inAerospikeClient: &ErrorProneAerospikeClient{ServerError: "TEST_PUT_ERROR"}, inKey: "testKey", inValueToStore: "not default value", expectedStoredVal: "", @@ -263,12 +487,8 @@ func TestClientPut(t *testing.T) { }, { desc: "AerospikeBackend.Put() does not throw error", - inAerospikeClient: &goodAerospikeClient{ - records: map[string]*as.Record{ - "defaultKey": { - Bins: as.BinMap{binValue: "Default value"}, - }, - }, + inAerospikeClient: &GoodAerospikeClient{ + StoredData: map[string]string{"defaultKey": "Default value"}, }, inKey: "testKey", inValueToStore: "any value", @@ -298,64 +518,3 @@ func TestClientPut(t *testing.T) { } } } - -// Aerospike client that always throws an error -type errorProneAerospikeClient struct { - errorThrowingFunction string -} - -func (c *errorProneAerospikeClient) NewUUIDKey(namespace string, key string) (*as.Key, error) { - if c.errorThrowingFunction == "TEST_KEY_GEN_ERROR" { - return nil, as_types.NewAerospikeError(as_types.NOT_AUTHENTICATED) - } - return nil, nil -} - -func (c *errorProneAerospikeClient) Get(key *as.Key) (*as.Record, error) { - if c.errorThrowingFunction == "TEST_GET_ERROR" { - return nil, as_types.NewAerospikeError(as_types.KEY_NOT_FOUND_ERROR) - } else if c.errorThrowingFunction == "TEST_NO_BUCKET_ERROR" { - return &as.Record{Bins: as.BinMap{"AnyKey": "any_value"}}, nil - } else if c.errorThrowingFunction == "TEST_NON_STRING_VALUE_ERROR" { - return &as.Record{Bins: as.BinMap{binValue: 0.0}}, nil - } - return nil, nil -} - -func (c *errorProneAerospikeClient) Put(policy *as.WritePolicy, key *as.Key, binMap as.BinMap) error { - if c.errorThrowingFunction == "TEST_PUT_ERROR" { - return as_types.NewAerospikeError(as_types.KEY_EXISTS_ERROR) - } - return nil -} - -// Aerospike client that does not throw errors -type goodAerospikeClient struct { - records map[string]*as.Record -} - -func (c *goodAerospikeClient) Get(aeKey *as.Key) (*as.Record, error) { - if aeKey != nil && aeKey.Value() != nil { - key := aeKey.Value().String() - - if rec, found := c.records[key]; found { - return rec, nil - } - } - return nil, as_types.NewAerospikeError(as_types.KEY_NOT_FOUND_ERROR) -} - -func (c *goodAerospikeClient) Put(policy *as.WritePolicy, aeKey *as.Key, binMap as.BinMap) error { - if aeKey != nil && aeKey.Value() != nil { - key := aeKey.Value().String() - c.records[key] = &as.Record{ - Bins: binMap, - } - return nil - } - return as_types.NewAerospikeError(as_types.KEY_MISMATCH) -} - -func (c *goodAerospikeClient) NewUUIDKey(namespace string, key string) (*as.Key, error) { - return as.NewKey(namespace, setName, key) -} diff --git a/backends/cassandra_test.go b/backends/cassandra_test.go index 0a86b2ef..6b565927 100644 --- a/backends/cassandra_test.go +++ b/backends/cassandra_test.go @@ -31,7 +31,7 @@ func TestCassandraClientGet(t *testing.T) { { "CassandraBackend.Get() throws a Cassandra ErrNotFound error", testInput{ - &errorProneCassandraClient{err: gocql.ErrNotFound}, + &ErrorProneCassandraClient{ServerError: gocql.ErrNotFound}, "someKeyThatWontBeFound", }, testExpectedValues{ @@ -42,7 +42,7 @@ func TestCassandraClientGet(t *testing.T) { { "CassandraBackend.Get() throws an error different from Cassandra ErrNotFound error", testInput{ - &errorProneCassandraClient{err: errors.New("some other get error")}, + &ErrorProneCassandraClient{ServerError: errors.New("some other get error")}, "someKey", }, testExpectedValues{ @@ -53,7 +53,9 @@ func TestCassandraClientGet(t *testing.T) { { "CassandraBackend.Get() doesn't throw an error", testInput{ - &goodCassandraClient{key: "defaultKey", value: "aValue"}, + &GoodCassandraClient{ + StoredData: map[string]string{"defaultKey": "aValue"}, + }, "defaultKey", }, testExpectedValues{ @@ -100,7 +102,7 @@ func TestCassandraClientPut(t *testing.T) { { "CassandraBackend.Put() didn't store the value under the corresponding key. Because the 'applied' return value was false, expect a RECORD_EXISTS error", testInput{ - cassandraClient: &errorProneCassandraClient{applied: false}, + cassandraClient: &ErrorProneCassandraClient{Applied: false}, key: "someKey", valueToStore: "someValue", ttl: 10, @@ -113,7 +115,7 @@ func TestCassandraClientPut(t *testing.T) { { "CassandraBackend.Put() returns the 'applied' boolean value as 'true' in addition to a Cassandra server error. Not even sure if this scenario is feasible in practice", testInput{ - cassandraClient: &errorProneCassandraClient{applied: true, err: gocql.ErrNoConnections}, + cassandraClient: &ErrorProneCassandraClient{Applied: true, ServerError: gocql.ErrNoConnections}, key: "someKey", valueToStore: "someValue", ttl: 10, @@ -126,7 +128,7 @@ func TestCassandraClientPut(t *testing.T) { { "CassandraBackend.Put() gets called with zero ttlSeconds, value gets successfully set anyways", testInput{ - cassandraClient: &goodCassandraClient{key: "defaultKey", value: "aValue"}, + cassandraClient: &GoodCassandraClient{StoredData: map[string]string{"defaultKey": "aValue"}}, key: "defaultKey", valueToStore: "aValue", ttl: 0, @@ -139,7 +141,7 @@ func TestCassandraClientPut(t *testing.T) { { "CassandraBackend.Put() successful, no need to set defaultTTL because ttl is greater than zero", testInput{ - cassandraClient: &goodCassandraClient{key: "defaultKey", value: "aValue"}, + cassandraClient: &GoodCassandraClient{StoredData: map[string]string{"defaultKey": "aValue"}}, key: "defaultKey", valueToStore: "aValue", ttl: 1, @@ -169,47 +171,3 @@ func TestCassandraClientPut(t *testing.T) { } } } - -// Cassandra client that always throws an error -type errorProneCassandraClient struct { - applied bool - err error -} - -func (ec *errorProneCassandraClient) Init() error { - return errors.New("init error") -} - -func (ec *errorProneCassandraClient) Get(ctx context.Context, key string) (string, error) { - return "", ec.err -} - -func (ec *errorProneCassandraClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { - return ec.applied, ec.err -} - -// Cassandra client client that does not throw errors -type goodCassandraClient struct { - key string - value string -} - -func (gc *goodCassandraClient) Init() error { - return nil -} - -func (gc *goodCassandraClient) Get(ctx context.Context, key string) (string, error) { - if key == gc.key { - return gc.value, nil - } - return "", utils.NewPBCError(utils.KEY_NOT_FOUND) -} - -func (gc *goodCassandraClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { - if gc.key != key { - gc.key = key - } - gc.value = value - - return true, nil -} diff --git a/backends/config/config.go b/backends/config/config.go index 7d9cf47d..22c9c010 100644 --- a/backends/config/config.go +++ b/backends/config/config.go @@ -16,6 +16,12 @@ import ( func NewBackend(cfg config.Configuration, appMetrics *metrics.Metrics) backends.Backend { backend := newBaseBackend(cfg.Backend, appMetrics) + backend = DecorateBackend(cfg, appMetrics, backend) + + return backend +} + +func DecorateBackend(cfg config.Configuration, appMetrics *metrics.Metrics, backend backends.Backend) backends.Backend { backend = applyCompression(cfg.Compression, backend) if cfg.RequestLimits.MaxSize > 0 { backend = decorators.EnforceSizeLimit(backend, cfg.RequestLimits.MaxSize) @@ -25,6 +31,7 @@ func NewBackend(cfg config.Configuration, appMetrics *metrics.Metrics) backends. // We should re-work this strategy at some point. backend = decorators.LogMetrics(backend, appMetrics) backend = decorators.LimitTTLs(backend, getMaxTTLSeconds(cfg)) + return backend } @@ -56,6 +63,8 @@ func newBaseBackend(cfg config.Backend, appMetrics *metrics.Metrics) backends.Ba return backends.NewAerospikeBackend(cfg.Aerospike, appMetrics) case config.BackendRedis: return backends.NewRedisBackend(cfg.Redis, ctx) + case config.BackendIgnite: + return backends.NewIgniteBackend(cfg.Ignite) default: log.Fatalf("Unknown backend type: %s", cfg.Type) } @@ -84,8 +93,8 @@ func getMaxTTLSeconds(cfg config.Configuration) int { case config.BackendAerospike: // If both config.request_limits.max_ttl_seconds and config.backend.aerospike.default_ttl_seconds // were defined, the smallest value takes preference - if cfg.Backend.Aerospike.DefaultTTL > 0 && maxTTLSeconds > cfg.Backend.Aerospike.DefaultTTL { - maxTTLSeconds = cfg.Backend.Aerospike.DefaultTTL + if cfg.Backend.Aerospike.DefaultTTLSecs > 0 && maxTTLSeconds > cfg.Backend.Aerospike.DefaultTTLSecs { + maxTTLSeconds = cfg.Backend.Aerospike.DefaultTTLSecs } case config.BackendRedis: // If both config.request_limits.max_ttl_seconds and backend.redis.expiration diff --git a/backends/config/config_test.go b/backends/config/config_test.go index bab2f919..220b934f 100644 --- a/backends/config/config_test.go +++ b/backends/config/config_test.go @@ -121,24 +121,24 @@ func TestNewBaseBackend(t *testing.T) { testCases := []struct { desc string inConfig config.Backend + inExpectPanic bool expectedBackendType backends.Backend expectedLogEntries []logEntry }{ { - desc: "unknown", - inConfig: config.Backend{Type: "unknown"}, + desc: "unknown", + inConfig: config.Backend{Type: "unknown"}, + inExpectPanic: true, expectedLogEntries: []logEntry{ {msg: "Unknown backend type: unknown", lvl: logrus.FatalLevel}, }, }, { - desc: "Cassandra", - inConfig: config.Backend{Type: config.BackendCassandra}, + desc: "Cassandra", + inConfig: config.Backend{Type: config.BackendCassandra}, + inExpectPanic: true, expectedLogEntries: []logEntry{ - { - msg: "Error creating Cassandra backend: ", - lvl: logrus.FatalLevel, - }, + {msg: "Error creating Cassandra backend: ", lvl: logrus.FatalLevel}, }, }, { @@ -149,27 +149,46 @@ func TestNewBaseBackend(t *testing.T) { }, }, { - desc: "Redis", - inConfig: config.Backend{Type: config.BackendRedis}, + desc: "Redis", + inConfig: config.Backend{Type: config.BackendRedis}, + inExpectPanic: true, expectedLogEntries: []logEntry{ {msg: "Error creating Redis backend: ", lvl: logrus.FatalLevel}, }, }, + { + desc: "Ignite", + inConfig: config.Backend{Type: config.BackendIgnite}, + inExpectPanic: true, + expectedLogEntries: []logEntry{ + { + msg: "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name", + lvl: logrus.FatalLevel, + }, + }, + }, } for _, tc := range testCases { mockMetrics := metricstest.CreateMockMetrics() m := &metrics.Metrics{ - MetricEngines: []metrics.CacheMetrics{ - &mockMetrics, - }, + MetricEngines: []metrics.CacheMetrics{&mockMetrics}, } - // run and assert it panics + // run panicTestFunction := func() { newBaseBackend(tc.inConfig, m) } - assert.Panics(t, panicTestFunction, "%s backend initialized in this test should error and panic.", tc.desc) + + if tc.inExpectPanic { + if !assert.Panics(t, panicTestFunction, "%s backend initialized in this test should error and panic.", tc.desc) { + continue + } + } else { + if !assert.NotPanics(t, panicTestFunction, "%s backend initialized in this test should not panic.", tc.desc) { + continue + } + } // assertions assert.Len(t, hook.Entries, len(tc.expectedLogEntries), tc.desc) @@ -233,7 +252,7 @@ func TestGetMaxTTLSeconds(t *testing.T) { Backend: config.Backend{ Type: config.BackendAerospike, Aerospike: config.Aerospike{ - DefaultTTL: 0, + DefaultTTLSecs: 0, }, }, RequestLimits: config.RequestLimits{ @@ -248,7 +267,7 @@ func TestGetMaxTTLSeconds(t *testing.T) { Backend: config.Backend{ Type: config.BackendAerospike, Aerospike: config.Aerospike{ - DefaultTTL: 100, + DefaultTTLSecs: 100, }, }, RequestLimits: config.RequestLimits{ @@ -263,7 +282,7 @@ func TestGetMaxTTLSeconds(t *testing.T) { Backend: config.Backend{ Type: config.BackendAerospike, Aerospike: config.Aerospike{ - DefaultTTL: 1, + DefaultTTLSecs: 1, }, }, RequestLimits: config.RequestLimits{ diff --git a/backends/ignite.go b/backends/ignite.go new file mode 100644 index 00000000..7c13dde3 --- /dev/null +++ b/backends/ignite.go @@ -0,0 +1,256 @@ +package backends + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/prebid/prebid-cache/config" + "github.com/prebid/prebid-cache/utils" + log "github.com/sirupsen/logrus" +) + +// IgniteBackend implements Backend interface and communicates with the Apache Ignite storage +// via its REST API as documented in https://ignite.apache.org/docs/2.11.1/restapi#rest-api-reference +type IgniteBackend struct { + sender requestSender + serverURL *url.URL + headers http.Header + cacheName string +} + +// httpClientWrapper lets us mock the http.Client +type httpClientWrapper interface { + Do(req *http.Request) (*http.Response, error) +} + +// requestSender defines a DoRequest method that will let us send the request to the Ignite server +// and handle it's response and error. Other implementations of it will let us mock errorscenarios. +type requestSender interface { + DoRequest(ctx context.Context, url *url.URL, headers http.Header) ([]byte, error) +} + +// igniteSender implements the requestSender interface +type igniteSender struct { + httpClient httpClientWrapper +} + +// DoRequest will hit the Ignite server specified in the url parameter and handle error responses +func (c *igniteSender) DoRequest(ctx context.Context, url *url.URL, headers http.Header) ([]byte, error) { + httpReq, err := http.NewRequestWithContext(ctx, "GET", url.String(), nil) + if err != nil { + return nil, err + } + + if len(headers) > 0 { + httpReq.Header = headers + } + + httpResp, httpErr := c.httpClient.Do(httpReq) + if httpErr != nil { + return nil, httpErr + } + + if httpResp.StatusCode != http.StatusOK { + httpErr = fmt.Errorf("Ignite error. Unexpected status code: %d", httpResp.StatusCode) + } + + if httpResp.Body == nil { + errMsg := "Received empty httpResp.Body" + if httpErr == nil { + return nil, fmt.Errorf("Ignite error. %s", errMsg) + } + return nil, fmt.Errorf("%s; %s", httpErr.Error(), errMsg) + } + defer httpResp.Body.Close() + + responseBody, ioErr := io.ReadAll(httpResp.Body) + if ioErr != nil { + errMsg := fmt.Sprintf("IO reader error: %s", ioErr) + if httpErr == nil { + return nil, fmt.Errorf("Ignite error. %s", errMsg) + } + return nil, fmt.Errorf("%s; %s", httpErr.Error(), errMsg) + } + + return responseBody, httpErr +} + +// NewIgniteBackend expects a valid config.IgniteBackend object and will create an Apache Ignite cache in the +// Ignite server if the config.Ignite.Cache.CreateOnStart flag is set to true +func NewIgniteBackend(cfg config.Ignite) *IgniteBackend { + if len(cfg.Scheme) == 0 || len(cfg.Host) == 0 || cfg.Port == 0 || len(cfg.Cache.Name) == 0 { + errMsg := "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name" + log.Fatalf(errMsg) + panic(errMsg) + } + + url, err := url.Parse(fmt.Sprintf("%s://%s:%d/ignite?cacheName=%s", cfg.Scheme, cfg.Host, cfg.Port, cfg.Cache.Name)) + if err != nil { + errMsg := fmt.Sprintf("Error creating Ignite backend: error parsing Ignite host URL %s", err.Error()) + log.Fatalf(errMsg) + panic(errMsg) + } + + igb := &IgniteBackend{serverURL: url} + if cfg.VerifyCert { + igb.sender = &igniteSender{ + httpClient: http.DefaultClient, + } + } else { + igb.sender = &igniteSender{ + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + }, + } + } + + if len(cfg.Headers) > 0 { + igb.headers = http.Header{} + for k, v := range cfg.Headers { + igb.headers.Add(k, v) + } + } + + if cfg.Cache.CreateOnStart { + igb.cacheName = cfg.Cache.Name + if err := createCache(igb); err != nil { + errMsg := fmt.Sprintf("Error creating Ignite backend: %s", err.Error()) + log.Fatalf(errMsg) + panic(errMsg) + } + } + log.Infof("Prebid Cache will write to Ignite cache name: %s", cfg.Cache.Name) + + return igb +} + +// createCache uses the Apache Ignite REST API "getorcreate" command to create a cache +func createCache(igb *IgniteBackend) error { + + urlCopy := *igb.serverURL + q := urlCopy.Query() + q.Set("cmd", "getorcreate") + q.Set("cacheName", igb.cacheName) + urlCopy.RawQuery = q.Encode() + + responseBytes, err := igb.sender.DoRequest(context.Background(), &urlCopy, nil) + if err != nil { + return err + } + + igniteResponse := getResponse{} + + if unmarshalErr := json.Unmarshal(responseBytes, &igniteResponse); unmarshalErr != nil { + return fmt.Errorf("Unmarshal response error: %s; Response body: %s", unmarshalErr.Error(), string(responseBytes)) + } + + if len(igniteResponse.Error) > 0 { + return fmt.Errorf("Ignite error. %s", igniteResponse.Error) + } + if igniteResponse.Status > 0 { + return fmt.Errorf("Ignite error. successStatus does not equal 0 %v", igniteResponse) + } + + return nil +} + +// getResponse is used to unmarshal the Ignite server's response to a GET request with +// the "cmd" URL query field set to "get" +type getResponse struct { + Error string `json:"error"` + Response string `json:"response"` + Status int `json:"successStatus"` +} + +// Get implements the Backend interface. Makes the Ignite storage client retrieve the value that has +// been previously stored under 'key' if its TTL is still current. We can tell when a key is not found +// when Ignite doesn't return an error, nor a 'Status' different than zero, but the 'Response' field is +// empty. Get can also return Ignite server-side errors +func (ig *IgniteBackend) Get(ctx context.Context, key string) (string, error) { + urlCopy := *ig.serverURL + q := urlCopy.Query() + q.Set("cmd", "get") + q.Set("key", key) + + urlCopy.RawQuery = q.Encode() + + responseBytes, err := ig.sender.DoRequest(ctx, &urlCopy, ig.headers) + if err != nil { + return "", err + } + + // Unmarshal response + igniteResponse := getResponse{} + + if unmarshalErr := json.Unmarshal(responseBytes, &igniteResponse); unmarshalErr != nil { + return "", utils.NewPBCError(utils.GET_INTERNAL_SERVER, fmt.Sprintf("Ignite response unmarshal error: %s; Response body: %s", unmarshalErr.Error(), string(responseBytes))) + } + + // Validate response + if len(igniteResponse.Error) > 0 { + return "", utils.NewPBCError(utils.GET_INTERNAL_SERVER, igniteResponse.Error) + } else if igniteResponse.Status > 0 { + return "", utils.NewPBCError(utils.GET_INTERNAL_SERVER, "Ignite response. Status not zero") + } else if len(igniteResponse.Response) == 0 { + return "", utils.NewPBCError(utils.KEY_NOT_FOUND) + } + + return igniteResponse.Response, nil +} + +// putResponse is used to unmarshal the Ignite server's response to a PUT request with +// the "cmd" URL query field set to "putifabs" +type putResponse struct { + Error string `json:"error"` + Response bool `json:"response"` + Status int `json:"successStatus"` +} + +// Put implements the Backend interface to comunicates with the Ignite storage service to perform +// a "putifabs" command in order to store the "value" parameter only if the "key" doesn't exist in +// the storage already. Returns RecordExistsError or whatever PUT_INTERNAL_SERVER error we might +// find in the storage side +func (ig *IgniteBackend) Put(ctx context.Context, key string, value string, ttlSeconds int) error { + + urlCopy := *ig.serverURL + q := urlCopy.Query() + q.Set("cmd", "putifabs") + q.Set("key", key) + q.Set("val", value) + q.Set("exp", fmt.Sprintf("%d", ttlSeconds*1000)) + + urlCopy.RawQuery = q.Encode() + + responseBytes, err := ig.sender.DoRequest(ctx, &urlCopy, ig.headers) + if err != nil { + return err + } + + // Unmarshal response + igniteResponse := putResponse{} + if unmarshalErr := json.Unmarshal(responseBytes, &igniteResponse); unmarshalErr != nil { + return fmt.Errorf("Unmarshal response error: %s; Response body: %s", unmarshalErr.Error(), string(responseBytes)) + } + + // Validate response + if len(igniteResponse.Error) > 0 { + return utils.NewPBCError(utils.PUT_INTERNAL_SERVER, igniteResponse.Error) + } + + if igniteResponse.Status > 0 { + return utils.NewPBCError(utils.PUT_INTERNAL_SERVER, "Ignite responded with non-zero successStatus code") + } + + if !igniteResponse.Response { + return utils.NewPBCError(utils.RECORD_EXISTS) + } + + return nil +} diff --git a/backends/ignite_test.go b/backends/ignite_test.go new file mode 100644 index 00000000..2c8f2f64 --- /dev/null +++ b/backends/ignite_test.go @@ -0,0 +1,668 @@ +package backends + +import ( + "context" + "crypto/tls" + "errors" + "io" + "net/http" + "net/url" + "testing" + + "github.com/prebid/prebid-cache/config" + "github.com/prebid/prebid-cache/utils" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" +) + +func TestDoRequest(t *testing.T) { + type testInput struct { + ctx context.Context + headers http.Header + httpResp *http.Response + httpErr error + } + + type testOutput struct { + resp []byte + err error + } + + testCases := []struct { + desc string + in testInput + expected testOutput + }{ + { + desc: "http.NewRequestWithContext returns error because nil Context", + expected: testOutput{ + resp: nil, + err: errors.New("net/http: nil Context"), + }, + }, + { + desc: "http.Client.Do() returns an error", + in: testInput{ + ctx: context.TODO(), + httpResp: nil, + httpErr: &url.Error{ + Op: "GET", + Err: errors.New("fake http.Client error"), + }, + }, + expected: testOutput{ + resp: nil, + err: &url.Error{Op: "GET", Err: errors.New("fake http.Client error")}, + }, + }, + { + desc: "nil http.Response.Body is returned", + in: testInput{ + ctx: context.TODO(), + httpResp: &http.Response{ + StatusCode: http.StatusOK, + Body: nil, + }, + httpErr: nil, + }, + expected: testOutput{ + resp: nil, + err: errors.New("Ignite error. Received empty httpResp.Body"), + }, + }, + { + desc: "Non 200 status code is returned in the http.Response", + in: testInput{ + ctx: context.TODO(), + httpResp: &http.Response{ + StatusCode: http.StatusNotFound, + Body: fakeReadCloser{ + body: []byte{0x0}, + err: io.EOF, + }, + }, + httpErr: nil, + }, + expected: testOutput{ + resp: []byte{0x0}, + err: errors.New("Ignite error. Unexpected status code: 404"), + }, + }, + { + desc: "http.Response.Body read error", + in: testInput{ + ctx: context.TODO(), + httpResp: &http.Response{ + StatusCode: http.StatusOK, + Body: fakeReadCloser{ + body: []byte{0x0}, + err: io.ErrShortBuffer, + }, + }, + httpErr: nil, + }, + expected: testOutput{ + resp: nil, + err: errors.New("Ignite error. IO reader error: short buffer"), + }, + }, + { + desc: "Success", + in: testInput{ + ctx: context.TODO(), + headers: http.Header{ + "HEADER": []string{"value"}, + }, + httpResp: &http.Response{ + StatusCode: http.StatusOK, + Body: fakeReadCloser{ + body: []byte(`{"jsonObject":"value"}`), + err: io.EOF, + }, + }, + httpErr: nil, + }, + expected: testOutput{ + resp: []byte(`{"jsonObject":"value"}`), + err: nil, + }, + }, + } + for _, tc := range testCases { + fakeIgniteClient := &igniteSender{ + httpClient: &fakeHttpClient{ + mockFunction: func() (*http.Response, error) { + return tc.in.httpResp, tc.in.httpErr + }, + }, + } + actualResp, actualErr := fakeIgniteClient.DoRequest(tc.in.ctx, &url.URL{}, tc.in.headers) + + assert.Equal(t, tc.expected.resp, actualResp, tc.desc) + assert.Equal(t, tc.expected.err, actualErr, tc.desc) + } +} + +func TestNewIgniteBackend(t *testing.T) { + type logEntry struct { + msg string + lvl logrus.Level + } + + type testOut struct { + backend *IgniteBackend + panicHappens bool + logEntries []logEntry + } + + type testCase struct { + desc string + in config.Ignite + expected testOut + } + testGroups := []struct { + desc string + testCases []testCase + }{ + { + desc: "config validation error", + testCases: []testCase{ + { + desc: "empty scheme", + in: config.Ignite{ + Scheme: "", + Host: "127.0.0.1", + Port: 8080, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + CreateOnStart: false, + }, + }, + expected: testOut{ + backend: nil, + panicHappens: true, + logEntries: []logEntry{ + { + msg: "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name", + lvl: logrus.FatalLevel, + }, + }, + }, + }, + { + desc: "empty host", + in: config.Ignite{ + Scheme: "http", + Host: "", + Port: 8080, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + CreateOnStart: false, + }, + }, + expected: testOut{ + backend: nil, + panicHappens: true, + logEntries: []logEntry{ + { + msg: "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name", + lvl: logrus.FatalLevel, + }, + }, + }, + }, + { + desc: "empty port", + in: config.Ignite{ + Scheme: "http", + Host: "127.0.0.1", + Port: 0, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + CreateOnStart: false, + }, + }, + expected: testOut{ + backend: nil, + panicHappens: true, + logEntries: []logEntry{ + { + msg: "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name", + lvl: logrus.FatalLevel, + }, + }, + }, + }, + { + desc: "No cache name", + in: config.Ignite{ + Scheme: "http", + Host: "127.0.0.1", + Port: 8080, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{}, + }, + expected: testOut{ + backend: nil, + panicHappens: true, + logEntries: []logEntry{ + { + msg: "Error creating Ignite backend: configuration is missing ignite.schema, ignite.host, ignite.port or ignite.cache.name", + lvl: logrus.FatalLevel, + }, + }, + }, + }, + }, + }, + { + desc: "parse URL error", + testCases: []testCase{ + { + desc: "Non-empty scheme holds an invalid value", + in: config.Ignite{ + Scheme: ":invalid:", + Host: "127.0.0.1", + Port: 8080, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + }, + }, + expected: testOut{ + backend: nil, + panicHappens: true, + logEntries: []logEntry{ + { + msg: "Error creating Ignite backend: error parsing Ignite host URL parse \":invalid:://127.0.0.1:8080/ignite?cacheName=myCache\": missing protocol scheme", + lvl: logrus.FatalLevel, + }, + }, + }, + }, + }, + }, + { + desc: "Non error", + testCases: []testCase{ + { + desc: "Expect validation to pass and a default client with secure http transport", + in: config.Ignite{ + Scheme: "http", + Host: "127.0.0.1", + Port: 8080, + VerifyCert: true, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + CreateOnStart: false, + }, + }, + expected: testOut{ + backend: &IgniteBackend{ + serverURL: &url.URL{ + Scheme: "http", + Host: "127.0.0.1:8080", + Path: "/ignite", + RawQuery: "cacheName=myCache", + }, + sender: &igniteSender{httpClient: http.DefaultClient}, + headers: http.Header{"Header": []string{"Value"}}, + }, + panicHappens: false, + logEntries: []logEntry{ + { + msg: "Prebid Cache will write to Ignite cache name: myCache", + lvl: logrus.InfoLevel, + }, + }, + }, + }, + { + desc: "Expect validation to pass but with Secure is set to false. Expect client with insecure http transport", + in: config.Ignite{ + Scheme: "http", + Host: "127.0.0.1", + Port: 8080, + VerifyCert: false, + Headers: map[string]string{"Header": "Value"}, + Cache: config.IgniteCache{ + Name: "myCache", + CreateOnStart: false, + }, + }, + expected: testOut{ + backend: &IgniteBackend{ + serverURL: &url.URL{ + Scheme: "http", + Host: "127.0.0.1:8080", + Path: "/ignite", + RawQuery: "cacheName=myCache", + }, + sender: &igniteSender{ + httpClient: &http.Client{ + Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, + }, + }, + headers: http.Header{"Header": []string{"Value"}}, + }, + panicHappens: false, + logEntries: []logEntry{ + { + msg: "Prebid Cache will write to Ignite cache name: myCache", + lvl: logrus.InfoLevel, + }, + }, + }, + }, + }, + }, + } + + // logrus entries will be recorded to this `hook` object so we can compare and assert them + hook := test.NewGlobal() + + //substitute logger exit function so execution doesn't get interrupted when log.Fatalf() call comes + defer func() { logrus.StandardLogger().ExitFunc = nil }() + logrus.StandardLogger().ExitFunc = func(int) {} + + for _, group := range testGroups { + for _, tc := range group.testCases { + + var resultingBackend *IgniteBackend + if tc.expected.panicHappens { + assert.Panics(t, func() { resultingBackend = NewIgniteBackend(tc.in) }, "NewIgniteBackend() should have panicked and it didn't happen") + } else { + resultingBackend = NewIgniteBackend(tc.in) + } + if assert.Len(t, hook.Entries, len(tc.expected.logEntries), "%s - %s", group.desc, tc.desc) { + for i := 0; i < len(tc.expected.logEntries); i++ { + assert.Equalf(t, tc.expected.logEntries[i].msg, hook.Entries[i].Message, "%s - %s", group.desc, tc.desc) + assert.Equalf(t, tc.expected.logEntries[i].lvl, hook.Entries[i].Level, "%s - %s", group.desc, tc.desc) + } + } + + assert.Equalf(t, tc.expected.backend, resultingBackend, "%s - %s", group.desc, tc.desc) + + //Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) + } + } +} + +type fakeIgniteClient struct { + respond func() ([]byte, error) +} + +func (c *fakeIgniteClient) DoRequest(ctx context.Context, url *url.URL, headers http.Header) ([]byte, error) { + return c.respond() +} + +func TestIgniteGet(t *testing.T) { + type testInput struct { + igniteResponse []byte + igniteError error + } + + type testOutput struct { + value string + err error + } + + testCases := []struct { + desc string + in testInput + expected testOutput + }{ + { + desc: "DoRequest call fails, expect error", + in: testInput{ + igniteResponse: nil, + igniteError: errors.New("Mock Ignite Client DoRequest() error"), + }, + expected: testOutput{ + err: errors.New("Mock Ignite Client DoRequest() error"), + }, + }, + { + desc: "DoRequest call returns malformed JSON blob", + in: testInput{ + igniteResponse: []byte(`malformed`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.GET_INTERNAL_SERVER, "Ignite response unmarshal error: invalid character 'm' looking for beginning of value; Response body: malformed"), + }, + }, + { + desc: "Ignite server responds with error message", + in: testInput{ + igniteResponse: []byte(`{"error":"Server side error"}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.GET_INTERNAL_SERVER, "Server side error"), + }, + }, + { + desc: "Ignite server responds with non-zero 'successStatus' value", + in: testInput{ + igniteResponse: []byte(`{"successStatus":1,"error":""}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.GET_INTERNAL_SERVER, "Ignite response. Status not zero"), + }, + }, + { + desc: "Ignite responds with 'successStatus' equal to zero and empty 'error' message, but also an empty 'response' field", + in: testInput{ + igniteResponse: []byte(`{"successStatus":0,"error":"","response":""}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.KEY_NOT_FOUND), + }, + }, + } + + for _, tc := range testCases { + back := &IgniteBackend{ + sender: &fakeIgniteClient{ + respond: func() ([]byte, error) { + return tc.in.igniteResponse, tc.in.igniteError + }, + }, + serverURL: &url.URL{}, + } + + v, err := back.Get(nil, "someKey") + + assert.Equal(t, tc.expected.value, v, tc.desc) + assert.Equal(t, tc.expected.err, err, tc.desc) + } +} + +func TestIgnitePut(t *testing.T) { + type testInput struct { + igniteResponse []byte + igniteError error + } + + type testOutput struct { + err error + } + + testCases := []struct { + desc string + in testInput + expected testOutput + }{ + { + desc: "DoRequest call fails, expect error", + in: testInput{ + igniteResponse: nil, + igniteError: errors.New("Mock Ignite Client DoRequest() error"), + }, + expected: testOutput{ + err: errors.New("Mock Ignite Client DoRequest() error"), + }, + }, + { + desc: "DoRequest call returns malformed JSON blob", + in: testInput{ + igniteResponse: []byte(`malformed`), + igniteError: nil, + }, + expected: testOutput{ + err: errors.New("Unmarshal response error: invalid character 'm' looking for beginning of value; Response body: malformed"), + }, + }, + { + desc: "Ignite server responds with error message", + in: testInput{ + igniteResponse: []byte(`{"error":"Server side error"}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.PUT_INTERNAL_SERVER, "Server side error"), + }, + }, + { + desc: "Ignite server responds with non-zero 'successStatus' value", + in: testInput{ + igniteResponse: []byte(`{"successStatus":1,"error":""}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.PUT_INTERNAL_SERVER, "Ignite responded with non-zero successStatus code"), + }, + }, + { + desc: "Ignite responds 'response' field set to false", + in: testInput{ + igniteResponse: []byte(`{"successStatus":0,"error":"","response":false}`), + igniteError: nil, + }, + expected: testOutput{ + err: utils.NewPBCError(utils.RECORD_EXISTS), + }, + }, + { + desc: "Successful ignite put", + in: testInput{ + igniteResponse: []byte(`{"successStatus":0,"error":"","response":true}`), + igniteError: nil, + }, + expected: testOutput{ + err: nil, + }, + }, + } + + for _, tc := range testCases { + back := &IgniteBackend{ + sender: &fakeIgniteClient{ + respond: func() ([]byte, error) { + return tc.in.igniteResponse, tc.in.igniteError + }, + }, + serverURL: &url.URL{}, + } + + err := back.Put(nil, "someKey", "someValue", 5) + + assert.Equal(t, tc.expected.err, err, tc.desc) + } +} + +func TestCreateCache(t *testing.T) { + type testInput struct { + igniteResponse []byte + igniteError error + } + + type testOutput struct { + err error + } + + testCases := []struct { + desc string + in testInput + expected testOutput + }{ + { + desc: "DoRequest call fails, expect error", + in: testInput{ + igniteResponse: nil, + igniteError: errors.New("Mock Ignite Client DoRequest() error"), + }, + expected: testOutput{ + err: errors.New("Mock Ignite Client DoRequest() error"), + }, + }, + { + desc: "DoRequest call returns malformed JSON blob", + in: testInput{ + igniteResponse: []byte(`malformed`), + igniteError: nil, + }, + expected: testOutput{ + err: errors.New("Unmarshal response error: invalid character 'm' looking for beginning of value; Response body: malformed"), + }, + }, + { + desc: "Ignite server responds with error message", + in: testInput{ + igniteResponse: []byte(`{"error":"Server side error"}`), + igniteError: nil, + }, + expected: testOutput{ + err: errors.New("Ignite error. Server side error"), + }, + }, + { + desc: "Ignite server responds with non-zero 'successStatus' value", + in: testInput{ + igniteResponse: []byte(`{"successStatus":1,"error":""}`), + igniteError: nil, + }, + expected: testOutput{ + err: errors.New(`Ignite error. successStatus does not equal 0 { 1}`), + }, + }, + { + desc: "Successfully created cache", + in: testInput{ + igniteResponse: []byte(`{"successStatus":0,"error":""}`), + igniteError: nil, + }, + expected: testOutput{ + err: nil, + }, + }, + } + + for _, tc := range testCases { + back := &IgniteBackend{ + sender: &fakeIgniteClient{ + respond: func() ([]byte, error) { + return tc.in.igniteResponse, tc.in.igniteError + }, + }, + serverURL: &url.URL{}, + } + + assert.Equal(t, tc.expected.err, createCache(back), tc.desc) + } +} diff --git a/backends/memcache_test.go b/backends/memcache_test.go index f3d55c40..088eaaab 100644 --- a/backends/memcache_test.go +++ b/backends/memcache_test.go @@ -34,7 +34,7 @@ func TestMemcacheGet(t *testing.T) { { "Memcache.Get() throws a memcache.ErrCacheMiss error", testInput{ - &errorProneMemcache{errorToThrow: memcache.ErrCacheMiss}, + &ErrorProneMemcache{ServerError: memcache.ErrCacheMiss}, "someKeyThatWontBeFound", }, testExpectedValues{ @@ -45,7 +45,7 @@ func TestMemcacheGet(t *testing.T) { { "Memcache.Get() throws an error different from Cassandra ErrNotFound error", testInput{ - &errorProneMemcache{errorToThrow: errors.New("some other get error")}, + &ErrorProneMemcache{ServerError: errors.New("some other get error")}, "someKey", }, testExpectedValues{ @@ -56,7 +56,7 @@ func TestMemcacheGet(t *testing.T) { { "Memcache.Get() doesn't throw an error", testInput{ - &goodMemcache{key: "defaultKey", value: "aValue"}, + &GoodMemcache{StoredData: map[string]string{"defaultKey": "aValue"}}, "defaultKey", }, testExpectedValues{ @@ -101,7 +101,7 @@ func TestMemcachePut(t *testing.T) { { "Memcache.Put() throws non-ErrNotStored error", testInput{ - &errorProneMemcache{errorToThrow: memcache.ErrServerError}, + &ErrorProneMemcache{ServerError: memcache.ErrServerError}, "someKey", "someValue", 10, @@ -114,7 +114,7 @@ func TestMemcachePut(t *testing.T) { { "Memcache.Put() throws ErrNotStored error", testInput{ - &errorProneMemcache{errorToThrow: memcache.ErrNotStored}, + &ErrorProneMemcache{ServerError: memcache.ErrNotStored}, "someKey", "someValue", 10, @@ -127,7 +127,7 @@ func TestMemcachePut(t *testing.T) { { "Memcache.Put() successful", testInput{ - &goodMemcache{key: "defaultKey", value: "aValue"}, + &GoodMemcache{StoredData: map[string]string{"defaultKey": "aValue"}}, "defaultKey", "aValue", 1, @@ -227,38 +227,3 @@ func TestNewMemcacheBackend(t *testing.T) { assert.Nil(t, hook.LastEntry()) } } - -// Memcache that always throws an error -type errorProneMemcache struct { - errorToThrow error -} - -func (ec *errorProneMemcache) Get(key string) (*memcache.Item, error) { - return nil, ec.errorToThrow -} - -func (ec *errorProneMemcache) Put(key string, value string, ttlSeconds int) error { - return ec.errorToThrow -} - -// Memcache client that does not throw errors -type goodMemcache struct { - key string - value string -} - -func (gc *goodMemcache) Get(key string) (*memcache.Item, error) { - if key == gc.key { - return &memcache.Item{Key: gc.key, Value: []byte(gc.value)}, nil - } - return nil, utils.NewPBCError(utils.KEY_NOT_FOUND) -} - -func (gc *goodMemcache) Put(key string, value string, ttlSeconds int) error { - if gc.key != key { - gc.key = key - } - gc.value = value - - return nil -} diff --git a/backends/redis.go b/backends/redis.go index 14c81375..d7128555 100644 --- a/backends/redis.go +++ b/backends/redis.go @@ -102,8 +102,11 @@ func (b *RedisBackend) Get(ctx context.Context, key string) (string, error) { func (b *RedisBackend) Put(ctx context.Context, key string, value string, ttlSeconds int) error { success, err := b.client.Put(ctx, key, value, ttlSeconds) + if err != nil && err != redis.Nil { + return err + } if !success { return utils.NewPBCError(utils.RECORD_EXISTS) } - return err + return nil } diff --git a/backends/redis_test.go b/backends/redis_test.go index bfb4a0ac..7238ae5c 100644 --- a/backends/redis_test.go +++ b/backends/redis_test.go @@ -29,34 +29,43 @@ func TestRedisClientGet(t *testing.T) { expected testExpectedValues }{ { - "RedisBackend.Get() throws a redis.Nil error", - testInput{ - &errorProneRedisClient{success: false, errorToThrow: redis.Nil}, - "someKeyThatWontBeFound", + desc: "RedisBackend.Get() throws a redis.Nil error", + in: testInput{ + redisClient: FakeRedisClient{ + Success: false, + ServerError: redis.Nil, + }, + key: "someKeyThatWontBeFound", }, - testExpectedValues{ + expected: testExpectedValues{ value: "", err: utils.NewPBCError(utils.KEY_NOT_FOUND), }, }, { - "RedisBackend.Get() throws an error different from redis.Nil", - testInput{ - &errorProneRedisClient{success: false, errorToThrow: errors.New("some other get error")}, - "someKey", + desc: "RedisBackend.Get() throws an error different from redis.Nil", + in: testInput{ + redisClient: FakeRedisClient{ + Success: false, + ServerError: errors.New("some other get error"), + }, + key: "someKey", }, - testExpectedValues{ + expected: testExpectedValues{ value: "", err: errors.New("some other get error"), }, }, { - "RedisBackend.Get() doesn't throw an error", - testInput{ - &goodRedisClient{key: "defaultKey", value: "aValue"}, - "defaultKey", + desc: "RedisBackend.Get() doesn't throw an error", + in: testInput{ + redisClient: FakeRedisClient{ + Success: true, + StoredData: map[string]string{"defaultKey": "aValue"}, + }, + key: "defaultKey", }, - testExpectedValues{ + expected: testExpectedValues{ value: "aValue", err: nil, }, @@ -86,8 +95,8 @@ func TestRedisClientPut(t *testing.T) { } type testExpectedValues struct { - value string - err error + writtenValue string + redisClientErr error } testCases := []struct { @@ -96,111 +105,91 @@ func TestRedisClientPut(t *testing.T) { expected testExpectedValues }{ { - "RedisBackend.Put() tries to overwrite already existing key", - testInput{ - &errorProneRedisClient{success: false, errorToThrow: redis.Nil}, - "repeatedKey", - "overwriteValue", - 10, + desc: "Try to overwrite already existing key. From redis client documentation, SetNX returns 'false' because no operation is performed", + in: testInput{ + redisClient: FakeRedisClient{ + Success: false, + StoredData: map[string]string{"key": "original value"}, + ServerError: redis.Nil, + }, + key: "key", + valueToStore: "overwrite value", + ttl: 10, }, - testExpectedValues{ - "", - utils.NewPBCError(utils.RECORD_EXISTS), + expected: testExpectedValues{ + redisClientErr: utils.NewPBCError(utils.RECORD_EXISTS), + writtenValue: "original value", }, }, { - "RedisBackend.Put() throws an error different from error redis.Nil, which gets returned when key does not exist.", - testInput{ - &errorProneRedisClient{success: true, errorToThrow: errors.New("some other Redis error")}, - "someKey", - "someValue", - 10, + desc: "When key does not exist, redis.Nil is returned. Other errors should be interpreted as a server side error. Expect error.", + in: testInput{ + redisClient: FakeRedisClient{ + Success: true, + StoredData: map[string]string{}, + ServerError: errors.New("A Redis client side error"), + }, + key: "someKey", + valueToStore: "someValue", + ttl: 10, }, - testExpectedValues{ - "", - errors.New("some other Redis error"), + expected: testExpectedValues{ + redisClientErr: errors.New("A Redis client side error"), }, }, { - "RedisBackend.Put() gets called with zero ttlSeconds, value gets successfully set anyways", - testInput{ - &goodRedisClient{key: "defaultKey", value: "aValue"}, - "defaultKey", - "aValue", - 0, + desc: "In Redis, a zero ttl value means no expiration. Expect value to be successfully set", + in: testInput{ + redisClient: FakeRedisClient{ + StoredData: map[string]string{}, + Success: true, + ServerError: redis.Nil, + }, + key: "defaultKey", + valueToStore: "aValue", + ttl: 0, }, - testExpectedValues{ - "aValue", - nil, + expected: testExpectedValues{ + writtenValue: "aValue", }, }, { - "RedisBackend.Put() successful, no need to set defaultTTL because ttl is greater than zero", - testInput{ - &goodRedisClient{key: "defaultKey", value: "aValue"}, - "defaultKey", - "aValue", - 1, + desc: "RedisBackend.Put() successful, no need to set defaultTTL because ttl is greater than zero", + in: testInput{ + redisClient: FakeRedisClient{ + StoredData: map[string]string{}, + Success: true, + ServerError: redis.Nil, + }, + key: "defaultKey", + valueToStore: "aValue", + ttl: 1, }, - testExpectedValues{ - "aValue", - nil, + expected: testExpectedValues{ + writtenValue: "aValue", }, }, } for _, tt := range testCases { - // Assign redis backend cient + // Assign redis backend client redisBackend.client = tt.in.redisClient // Run test actualErr := redisBackend.Put(context.Background(), tt.in.key, tt.in.valueToStore, tt.in.ttl) - // Assert Put error - assert.Equal(t, tt.expected.err, actualErr, tt.desc) + // Assertions + assert.Equal(t, tt.expected.redisClientErr, actualErr, tt.desc) - // Assert value - if tt.expected.err == nil { - storedValue, getErr := redisBackend.Get(context.Background(), tt.in.key) + // Put error + assert.Equal(t, tt.expected.redisClientErr, actualErr, tt.desc) - assert.NoError(t, getErr, tt.desc) - assert.Equal(t, tt.expected.value, storedValue, tt.desc) + if actualErr == nil || actualErr == utils.NewPBCError(utils.RECORD_EXISTS) { + // Either a value was inserted successfully or the record already existed. + // Assert data in the backend + storage, ok := tt.in.redisClient.(FakeRedisClient) + assert.True(t, ok, tt.desc) + assert.Equal(t, tt.expected.writtenValue, storage.StoredData[tt.in.key], tt.desc) } } } - -// errorProneRedisClient always throws an error -type errorProneRedisClient struct { - success bool - errorToThrow error -} - -func (ec *errorProneRedisClient) Get(ctx context.Context, key string) (string, error) { - return "", ec.errorToThrow -} - -func (ec *errorProneRedisClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { - return ec.success, ec.errorToThrow -} - -// goodRedisClient does not throw errors -type goodRedisClient struct { - key string - value string -} - -func (gc *goodRedisClient) Get(ctx context.Context, key string) (string, error) { - if key == gc.key { - return gc.value, nil - } - return "", utils.NewPBCError(utils.KEY_NOT_FOUND) -} - -func (gc *goodRedisClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { - if gc.key != key { - gc.key = key - } - gc.value = value - - return true, nil -} diff --git a/backends/test_utils.go b/backends/test_utils.go new file mode 100644 index 00000000..03737ef5 --- /dev/null +++ b/backends/test_utils.go @@ -0,0 +1,288 @@ +package backends + +import ( + "context" + "errors" + "io" + "net/http" + "net/url" + + as "github.com/aerospike/aerospike-client-go/v6" + as_types "github.com/aerospike/aerospike-client-go/v6/types" + "github.com/google/gomemcache/memcache" + "github.com/prebid/prebid-cache/utils" +) + +// ------------------------------------------ +// Aerospike client mocks +// ------------------------------------------ +func NewMockAerospikeBackend(mockClient AerospikeDB) *AerospikeBackend { + return &AerospikeBackend{client: mockClient} +} + +type ErrorProneAerospikeClient struct { + ServerError string +} + +func (c *ErrorProneAerospikeClient) NewUUIDKey(namespace string, key string) (*as.Key, error) { + if c.ServerError == "TEST_KEY_GEN_ERROR" { + return nil, &as.AerospikeError{ResultCode: as_types.NOT_AUTHENTICATED} + } + return nil, nil +} + +func (c *ErrorProneAerospikeClient) Get(key *as.Key) (*as.Record, error) { + if c.ServerError == "TEST_GET_ERROR" { + return nil, &as.AerospikeError{ResultCode: as_types.KEY_NOT_FOUND_ERROR} + } else if c.ServerError == "TEST_NO_BUCKET_ERROR" { + return &as.Record{Bins: as.BinMap{"AnyKey": "any_value"}}, nil + } else if c.ServerError == "TEST_NON_STRING_VALUE_ERROR" { + return &as.Record{Bins: as.BinMap{binValue: 0.0}}, nil + } + return nil, nil +} + +func (c *ErrorProneAerospikeClient) Put(policy *as.WritePolicy, key *as.Key, binMap as.BinMap) error { + if c.ServerError == "TEST_PUT_ERROR" { + return &as.AerospikeError{ResultCode: as_types.KEY_EXISTS_ERROR} + } + return nil +} + +// Aerospike client that does not throw errors +type GoodAerospikeClient struct { + StoredData map[string]string +} + +func (c *GoodAerospikeClient) Get(aeKey *as.Key) (*as.Record, error) { + if aeKey != nil && aeKey.Value() != nil { + key := aeKey.Value().String() + + if value, found := c.StoredData[key]; found { + rec := &as.Record{ + Bins: as.BinMap{binValue: value}, + } + return rec, nil + } + } + return nil, &as.AerospikeError{ResultCode: as_types.KEY_NOT_FOUND_ERROR} +} + +func (c *GoodAerospikeClient) Put(policy *as.WritePolicy, aeKey *as.Key, binMap as.BinMap) error { + if aeKey != nil && aeKey.Value() != nil { + key := aeKey.Value().String() + if interfaceValue, found := binMap[binValue]; found { + if str, asserted := interfaceValue.(string); asserted { + c.StoredData[key] = str + } + } + + return nil + } + return &as.AerospikeError{ResultCode: as_types.KEY_MISMATCH} +} + +func (c *GoodAerospikeClient) NewUUIDKey(namespace string, key string) (*as.Key, error) { + return as.NewKey(namespace, setName, key) +} + +// ------------------------------------------ +// Cassandra client mocks +// ------------------------------------------ +func NewMockCassandraBackend(ttl int, mockClient CassandraDB) *CassandraBackend { + return &CassandraBackend{ + defaultTTL: ttl, + client: mockClient, + } +} + +type ErrorProneCassandraClient struct { + Applied bool + ServerError error +} + +func (ec *ErrorProneCassandraClient) Init() error { + return errors.New("init error") +} + +func (ec *ErrorProneCassandraClient) Get(ctx context.Context, key string) (string, error) { + return "", ec.ServerError +} + +func (ec *ErrorProneCassandraClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { + return ec.Applied, ec.ServerError +} + +// Cassandra client client that does not throw errors +type GoodCassandraClient struct { + StoredData map[string]string +} + +func (gc *GoodCassandraClient) Init() error { + return nil +} + +func (gc *GoodCassandraClient) Get(ctx context.Context, key string) (string, error) { + if value, found := gc.StoredData[key]; found { + return value, nil + } + return "", utils.NewPBCError(utils.KEY_NOT_FOUND) +} + +func (gc *GoodCassandraClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { + if _, found := gc.StoredData[key]; !found { + gc.StoredData[key] = value + } + return true, nil +} + +// ------------------------------------------ +// Memcache client mocks +// ------------------------------------------ +func NewMockMemcacheBackend(mockClient MemcacheDataStore) *MemcacheBackend { + return &MemcacheBackend{ + memcache: mockClient, + } +} + +type ErrorProneMemcache struct { + ServerError error +} + +func (ec *ErrorProneMemcache) Get(key string) (*memcache.Item, error) { + return nil, ec.ServerError +} + +func (ec *ErrorProneMemcache) Put(key string, value string, ttlSeconds int) error { + return ec.ServerError +} + +// Memcache client that does not throw errors +type GoodMemcache struct { + StoredData map[string]string +} + +func (gm *GoodMemcache) Get(key string) (*memcache.Item, error) { + if value, found := gm.StoredData[key]; found { + return &memcache.Item{Key: key, Value: []byte(value)}, nil + } + return nil, utils.NewPBCError(utils.KEY_NOT_FOUND) +} + +func (gm *GoodMemcache) Put(key string, value string, ttlSeconds int) error { + if _, found := gm.StoredData[key]; !found { + gm.StoredData[key] = value + } + return nil +} + +// ------------------------------------------ +// Redis client mocks +// ------------------------------------------ +func NewFakeRedisBackend(mockClient RedisDB) *RedisBackend { + return &RedisBackend{ + client: mockClient, + } +} + +type FakeRedisClient struct { + StoredData map[string]string + ServerError error + Success bool +} + +// Get returns an error if the FakeRedisClient has a non-nil ServerError field. +func (r FakeRedisClient) Get(ctx context.Context, key string) (string, error) { + if r.ServerError != nil { + return "", r.ServerError + } + if value, found := r.StoredData[key]; found { + return value, nil + } + return "", utils.NewPBCError(utils.KEY_NOT_FOUND) +} + +func (r FakeRedisClient) Put(ctx context.Context, key string, value string, ttlSeconds int) (bool, error) { + if _, found := r.StoredData[key]; !found { + r.StoredData[key] = value + } + return r.Success, r.ServerError +} + +// ------------------------------------------ +// Memory client mocks +// ------------------------------------------ +func NewErrorResponseMemoryBackend() *ErrorProneMemoryClient { + return &ErrorProneMemoryClient{} +} + +type ErrorProneMemoryClient struct{} + +func (ec *ErrorProneMemoryClient) Get(ctx context.Context, key string) (string, error) { + return "", errors.New("Backend error") +} + +func (ec *ErrorProneMemoryClient) Put(ctx context.Context, key string, value string, ttlSeconds int) error { + return errors.New("Backend error") +} + +// Good memory client does not throw errors +func NewMemoryBackendWithValues(customData map[string]string) (*MemoryBackend, error) { + backend := NewMemoryBackend() + + if len(customData) > 0 { + for k, v := range customData { + if err := backend.Put(context.Background(), k, v, 1); err != nil { + return backend, err + } + } + } + return backend, nil +} + +// ------------------------------------------ +// Ignite backend mocks +// ------------------------------------------ +type fakeHttpClient struct { + mockFunction func() (*http.Response, error) +} + +func (c *fakeHttpClient) Do(req *http.Request) (*http.Response, error) { + return c.mockFunction() +} + +type fakeReadCloser struct { + body []byte + err error +} + +func (rc fakeReadCloser) Read(p []byte) (n int, err error) { + copy(p, rc.body) + return len(rc.body), rc.err +} + +func (c fakeReadCloser) Close() error { return nil } + +func NewFakeIgniteBackend(fakeIgniteResponse []byte, fakeError error) *IgniteBackend { + return &IgniteBackend{ + sender: &igniteSender{ + httpClient: &fakeHttpClient{ + mockFunction: func() (*http.Response, error) { + httpStatus := http.StatusOK + if fakeError != nil { + httpStatus = http.StatusInternalServerError + } + httpResp := &http.Response{ + StatusCode: httpStatus, + Body: fakeReadCloser{ + body: fakeIgniteResponse, + err: io.EOF, + }, + } + return httpResp, fakeError + }, + }, + }, + serverURL: &url.URL{}, + } +} diff --git a/config.yaml b/config.yaml index d5329861..277cee95 100644 --- a/config.yaml +++ b/config.yaml @@ -10,32 +10,44 @@ request_limits: max_size_bytes: 10240 # 10K max_num_values: 10 max_ttl_seconds: 3600 +request_logging: + referer_sampling_rate: 0.0 backend: - type: "memory" # Can also be "aerospike", "cassandra", "memcache" or "redis" - aerospike: - host: "aerospike.prebid.com" - port: 3000 - namespace: "whatever" - cassandra: - hosts: "127.0.0.1" - keyspace: "prebid" - memcache: - config_host: "" # Configuration endpoint for auto discovery. Replaced at docker build. - poll_interval_seconds: 30 # Node change polling interval when auto discovery is used - hosts: "10.0.0.1:11211" # List of nodes when not using auto discovery. Can also use an array for multiple hosts. - redis: - host: "127.0.0.1" - port: 6379 - password: "" - db: 1 - expiration: 10 # in Minutes - tls: - enabled: false - insecure_skip_verify: false + type: "memory" # Switch to be "aerospike", "cassandra", "memcache", "ignite" or "redis" for production. +# aerospike: +# hosts: [ "aerospike.prebid.com" ] +# port: 3000 +# namespace: "anynamespace" +# cassandra: +# hosts: "127.0.0.1" +# keyspace: "prebid" +# memcache: +# config_host: "" # Configuration endpoint for auto discovery. Replaced at docker build. +# poll_interval_seconds: 30 # Node change polling interval when auto discovery is used +# hosts: "10.0.0.1:11211" # List of nodes when not using auto discovery. Can also use an array for multiple hosts. +# redis: +# host: "127.0.0.1" +# port: 6379 +# password: "" +# db: 1 +# expiration: 10 # in Minutes +# tls: +# enabled: false +# insecure_skip_verify: false +# ignite: +# scheme: "http" +# host: "127.0.0.1" +# port: 8080 +# secure: false +# headers: !!omap +# - Content-Length: 0 +# cache: +# name: "cacheName" +# create_on_start: false compression: type: "snappy" # Can also be "none" metrics: - type: "none" # Can also be "influx" + type: "none" # Can also be "influx" influx: host: "http://influx.prebid.com" database: "some-database" diff --git a/config/backends.go b/config/backends.go index baa98f76..2f5c8694 100644 --- a/config/backends.go +++ b/config/backends.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" log "github.com/sirupsen/logrus" @@ -12,6 +13,7 @@ type Backend struct { Cassandra Cassandra `mapstructure:"cassandra"` Memcache Memcache `mapstructure:"memcache"` Redis Redis `mapstructure:"redis"` + Ignite Ignite `mapstructure:"ignite"` } func (cfg *Backend) validateAndLog() error { @@ -26,10 +28,12 @@ func (cfg *Backend) validateAndLog() error { return cfg.Memcache.validateAndLog() case BackendRedis: return cfg.Redis.validateAndLog() + case BackendIgnite: + return cfg.Ignite.validateAndLog() case BackendMemory: return nil default: - return fmt.Errorf(`invalid config.backend.type: %s. It must be "aerospike", "cassandra", "memcache", "redis", or "memory".`, cfg.Type) + return fmt.Errorf(`invalid config.backend.type: %s. It must be "aerospike", "cassandra", "memcache", "redis", "ignite", or "memory".`, cfg.Type) } return nil } @@ -42,16 +46,27 @@ const ( BackendMemcache BackendType = "memcache" BackendMemory BackendType = "memory" BackendRedis BackendType = "redis" + BackendIgnite BackendType = "ignite" ) type Aerospike struct { - DefaultTTL int `mapstructure:"default_ttl_seconds"` - Host string `mapstructure:"host"` - Hosts []string `mapstructure:"hosts"` - Port int `mapstructure:"port"` - Namespace string `mapstructure:"namespace"` - User string `mapstructure:"user"` - Password string `mapstructure:"password"` + DefaultTTLSecs int `mapstructure:"default_ttl_seconds"` + Host string `mapstructure:"host"` + Hosts []string `mapstructure:"hosts"` + Port int `mapstructure:"port"` + Namespace string `mapstructure:"namespace"` + User string `mapstructure:"user"` + Password string `mapstructure:"password"` + MaxReadRetries int `mapstructure:"max_read_retries"` + MaxWriteRetries int `mapstructure:"max_write_retries"` + // Please set this to a value lower than the `proto-fd-idle-ms` (converted + // to seconds) value set in your Aerospike Server. This is to avoid having + // race conditions where the server closes the connection but the client still + // tries to use it. If set to a value less than or equal to 0, Aerospike + // Client's default value will be used which is 55 seconds. + ConnIdleTimeoutSecs int `mapstructure:"connection_idle_timeout_seconds"` + // Specifies the size of the connection queue per node. + ConnQueueSize int `mapstructure:"connection_queue_size"` } func (cfg *Aerospike) validateAndLog() error { @@ -62,15 +77,41 @@ func (cfg *Aerospike) validateAndLog() error { if cfg.Port <= 0 { return fmt.Errorf("Cannot connect to Aerospike host at port %d", cfg.Port) } - if cfg.DefaultTTL > 0 { - log.Infof("config.backend.aerospike.default_ttl_seconds: %d. Note that this configuration option is being deprecated in favor of config.request_limits.max_ttl_seconds", cfg.DefaultTTL) - } + log.Infof("config.backend.aerospike.host: %s", cfg.Host) log.Infof("config.backend.aerospike.hosts: %v", cfg.Hosts) log.Infof("config.backend.aerospike.port: %d", cfg.Port) log.Infof("config.backend.aerospike.namespace: %s", cfg.Namespace) log.Infof("config.backend.aerospike.user: %s", cfg.User) + if cfg.DefaultTTLSecs > 0 { + log.Infof("config.backend.aerospike.default_ttl_seconds: %d. Note that this configuration option is being deprecated in favor of config.request_limits.max_ttl_seconds", cfg.DefaultTTLSecs) + } + + if cfg.ConnIdleTimeoutSecs > 0 { + log.Infof("config.backend.aerospike.connection_idle_timeout_seconds: %d.", cfg.ConnIdleTimeoutSecs) + } + + if cfg.MaxReadRetries < 2 { + log.Infof("config.backend.aerospike.max_read_retries value will default to 2") + cfg.MaxReadRetries = 2 + } else if cfg.MaxReadRetries > 2 { + log.Infof("config.backend.aerospike.max_read_retries: %d.", cfg.MaxReadRetries) + } + + if cfg.MaxWriteRetries < 0 { + log.Infof("config.backend.aerospike.max_write_retries value cannot be negative and will default to 0") + cfg.MaxWriteRetries = 0 + } else if cfg.MaxWriteRetries > 0 { + log.Infof("config.backend.aerospike.max_write_retries: %d.", cfg.MaxWriteRetries) + } + + if cfg.ConnQueueSize > 0 { + log.Infof("config.backend.aerospike.connection_queue_size: %d", cfg.ConnQueueSize) + } else { + log.Infof("config.backend.aerospike.connection_queue_size value will default to 256") + } + return nil } @@ -134,3 +175,39 @@ func (cfg *Redis) validateAndLog() error { log.Infof("config.backend.redis.tls.insecure_skip_verify: %t", cfg.TLS.InsecureSkipVerify) return nil } + +type Ignite struct { + Scheme string `mapstructure:"scheme"` + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + // If VerifyCert is set to true, Prebid Cache verifies the SSL certificate on the Ignite server + VerifyCert bool `mapstructure:"secure"` + Headers map[string]string `mapstructure:"headers"` + Cache IgniteCache `mapstructure:"cache"` +} + +type IgniteCache struct { + Name string `mapstructure:"name"` + CreateOnStart bool `mapstructure:"create_on_start"` +} + +func (cfg *Ignite) validateAndLog() error { + if len(cfg.Scheme) == 0 { + return errors.New("Cannot connect to Ignite: empty config.ignite.scheme") + } + if len(cfg.Host) == 0 { + return errors.New("Cannot connect to Ignite: empty config.ignite.host") + } + if len(cfg.Cache.Name) == 0 { + return errors.New("Cannot write nor read from Ignite: empty config.ignite.cachename") + } + log.Infof("config.backend.ignite.scheme: %s", cfg.Scheme) + log.Infof("config.backend.ignite.host: %s", cfg.Host) + log.Infof("config.backend.ignite.port: %d", cfg.Port) + log.Infof("config.backend.ignite.cache.create_on_start: %t", cfg.VerifyCert) + log.Infof("config.backend.ignite.cache.create_on_start: %v", cfg.Headers) + log.Infof("config.backend.ignite.cache.name: %s", cfg.Cache.Name) + log.Infof("config.backend.ignite.cache.create_on_start: %t", cfg.Cache.CreateOnStart) + + return nil +} diff --git a/config/backends_test.go b/config/backends_test.go index 5031d9a4..2b50e408 100644 --- a/config/backends_test.go +++ b/config/backends_test.go @@ -4,66 +4,317 @@ import ( "fmt" "testing" + "github.com/sirupsen/logrus" + testLogrus "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" ) func TestAerospikeValidateAndLog(t *testing.T) { - testCases := []struct { + + type logComponents struct { + msg string + lvl logrus.Level + } + + type testCase struct { desc string inCfg Aerospike hasError bool expectedError error + logEntries []logComponents + } + testGroups := []struct { + desc string + testCases []testCase }{ { - desc: "aerospike.hosts passed in", - inCfg: Aerospike{ - Hosts: []string{"foo.com", "bat.com"}, - Port: 8888, - }, - hasError: false, - }, - { - desc: "aerospike.host passed in", - inCfg: Aerospike{ - Host: "foo.com", - Port: 8888, - }, - hasError: false, - }, - { - desc: "aerospike.host aerospike.hosts passed in", - inCfg: Aerospike{ - Host: "foo.com", - Hosts: []string{"foo.com", "bat.com"}, - Port: 8888, + desc: "No errors expected", + testCases: []testCase{ + { + desc: "aerospike.host passed in", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.host passed in", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + Namespace: "prebid", + User: "prebid-user", + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: prebid", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: prebid-user", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.hosts passed in", + inCfg: Aerospike{ + Hosts: []string{"foo.com", "bat.com"}, + Port: 8888, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: ", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{"foo.com", "bat.com"}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "both aerospike.host aerospike.hosts passed in", + inCfg: Aerospike{ + Host: "foo.com", + Hosts: []string{"foo.com", "bat.com"}, + Port: 8888, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{"foo.com", "bat.com"}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "both aerospike.host, aerospike.hosts and aerospike.default_ttl_seconds set", + inCfg: Aerospike{ + Host: "foo.com", + Hosts: []string{"foo.com", "bat.com"}, + Port: 8888, + DefaultTTLSecs: 3600, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{"foo.com", "bat.com"}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.default_ttl_seconds: 3600. Note that this configuration option is being deprecated in favor of config.request_limits.max_ttl_seconds", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "both aerospike.host, aerospike.port and an aerospike.max_read_retries invalid value. Default to 2 retries", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + MaxReadRetries: 1, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.max_read_retries valid value.", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + MaxReadRetries: 3, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries: 3.", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.max_write_retries invalid value. Default to 0 retries", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + MaxReadRetries: 2, + MaxWriteRetries: -1, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_write_retries value cannot be negative and will default to 0", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.max_read_retries valid value.", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + MaxReadRetries: 2, + MaxWriteRetries: 1, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_write_retries: 1.", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "aerospike.connection_idle_timeout_seconds value found in config", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + ConnIdleTimeoutSecs: 1, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_idle_timeout_seconds: 1.", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "config.backend.aerospike.connection_queue_size invalid value found in config", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + ConnQueueSize: -1, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size value will default to 256", lvl: logrus.InfoLevel}, + }, + }, + { + desc: "config.backend.aerospike.connection_queue_size valid value found in config", + inCfg: Aerospike{ + Host: "foo.com", + Port: 8888, + ConnQueueSize: 64, + }, + hasError: false, + logEntries: []logComponents{ + {msg: "config.backend.aerospike.host: foo.com", lvl: logrus.InfoLevel}, + {msg: fmt.Sprintf("config.backend.aerospike.hosts: %v", []string{}), lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.port: 8888", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.namespace: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.user: ", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.max_read_retries value will default to 2", lvl: logrus.InfoLevel}, + {msg: "config.backend.aerospike.connection_queue_size: 64", lvl: logrus.InfoLevel}, + }, + }, }, - hasError: false, }, { - desc: "aerospike.host and aerospike.hosts missing", - inCfg: Aerospike{ - Port: 8888, + desc: "Expect error", + testCases: []testCase{ + { + desc: "aerospike.host and aerospike.hosts missing", + inCfg: Aerospike{ + Port: 8888, + }, + hasError: true, + expectedError: fmt.Errorf("Cannot connect to empty Aerospike host(s)"), + }, + { + desc: "aerospike.port config missing", + inCfg: Aerospike{ + Host: "foo.com", + }, + hasError: true, + expectedError: fmt.Errorf("Cannot connect to Aerospike host at port 0"), + }, + { + desc: "aerospike.port config missing", + inCfg: Aerospike{ + Host: "foo.com", + Hosts: []string{"bar.com"}, + }, + hasError: true, + expectedError: fmt.Errorf("Cannot connect to Aerospike host at port 0"), + }, }, - hasError: true, - expectedError: fmt.Errorf("Cannot connect to empty Aerospike host(s)"), - }, - { - desc: "aerospike.port config missing", - inCfg: Aerospike{ - Host: "foo.com", - }, - hasError: true, - expectedError: fmt.Errorf("Cannot connect to Aerospike host at port 0"), }, } - for _, test := range testCases { + // logrus entries will be recorded to this `hook` object so we can compare and assert them + hook := testLogrus.NewGlobal() + + //substitute logger exit function so execution doesn't get interrupted when log.Fatalf() call comes + defer func() { logrus.StandardLogger().ExitFunc = nil }() + var fatal bool + logrus.StandardLogger().ExitFunc = func(int) { fatal = true } + + for _, group := range testGroups { + for _, test := range group.testCases { + fatal = false + + //run test + if test.hasError { + assert.Equal(t, test.inCfg.validateAndLog(), test.expectedError, group.desc+" : "+test.desc) + } else { + assert.Nil(t, test.inCfg.validateAndLog(), group.desc+" : "+test.desc) + } + + assert.False(t, fatal, group.desc+" : "+test.desc) + + if assert.Len(t, hook.Entries, len(test.logEntries), "Incorrect number of entries were logged to logrus in test %s", group.desc+" : "+test.desc) { + for i := 0; i < len(test.logEntries); i++ { + assert.Equal(t, test.logEntries[i].msg, hook.Entries[i].Message, group.desc+" : "+test.desc) + assert.Equal(t, test.logEntries[i].lvl, hook.Entries[i].Level, group.desc+" : "+test.desc) + } + } - //run test - if test.hasError { - assert.Equal(t, test.inCfg.validateAndLog(), test.expectedError, test.desc) - } else { - assert.Nil(t, test.inCfg.validateAndLog(), test.desc) + //Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry(), group.desc+" : "+test.desc) } } } diff --git a/config/config.go b/config/config.go index 30daa17f..6e714a7e 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,8 @@ package config import ( + "net/http" + "strconv" "strings" "time" @@ -44,6 +46,7 @@ func setConfigDefaults(v *viper.Viper) { v.SetDefault("port", 2424) v.SetDefault("admin_port", 2525) v.SetDefault("index_response", "This application stores short-term data for use in Prebid.") + v.SetDefault("status_response", "") v.SetDefault("log.level", "info") v.SetDefault("backend.type", "memory") v.SetDefault("backend.aerospike.host", "") @@ -53,6 +56,10 @@ func setConfigDefaults(v *viper.Viper) { v.SetDefault("backend.aerospike.user", "") v.SetDefault("backend.aerospike.password", "") v.SetDefault("backend.aerospike.default_ttl_seconds", 0) + v.SetDefault("backend.aerospike.max_read_retries", 2) + v.SetDefault("backend.aerospike.max_write_retries", 0) + v.SetDefault("backend.aerospike.connection_idle_timeout_seconds", 0) + v.SetDefault("backend.aerospike.connection_queue_size", 0) v.SetDefault("backend.cassandra.hosts", "") v.SetDefault("backend.cassandra.keyspace", "") v.SetDefault("backend.cassandra.default_ttl_seconds", utils.CASSANDRA_DEFAULT_TTL_SECONDS) @@ -64,6 +71,13 @@ func setConfigDefaults(v *viper.Viper) { v.SetDefault("backend.redis.expiration", utils.REDIS_DEFAULT_EXPIRATION_MINUTES) v.SetDefault("backend.redis.tls.enabled", false) v.SetDefault("backend.redis.tls.insecure_skip_verify", false) + v.SetDefault("backend.ignite.scheme", "") + v.SetDefault("backend.ignite.host", "") + v.SetDefault("backend.ignite.port", 0) + v.SetDefault("backend.ignite.secure", false) + v.SetDefault("backend.ignite.headers", map[string]string{}) + v.SetDefault("backend.ignite.cache.name", "") + v.SetDefault("backend.ignite.cache.create_on_start", false) v.SetDefault("compression.type", "snappy") v.SetDefault("metrics.influx.enabled", false) v.SetDefault("metrics.influx.host", "") @@ -83,6 +97,8 @@ func setConfigDefaults(v *viper.Viper) { v.SetDefault("request_limits.max_size_bytes", utils.REQUEST_MAX_SIZE_BYTES) v.SetDefault("request_limits.max_num_values", utils.REQUEST_MAX_NUM_VALUES) v.SetDefault("request_limits.max_ttl_seconds", utils.REQUEST_MAX_TTL_SECONDS) + v.SetDefault("request_limits.max_header_size_bytes", http.DefaultMaxHeaderBytes) + v.SetDefault("request_logging.referer_sampling_rate", 0.0) v.SetDefault("routes.allow_public_write", true) } @@ -100,16 +116,19 @@ func setEnvVarsLookup(v *viper.Viper) { } type Configuration struct { - Port int `mapstructure:"port"` - AdminPort int `mapstructure:"admin_port"` - IndexResponse string `mapstructure:"index_response"` - Log Log `mapstructure:"log"` - RateLimiting RateLimiting `mapstructure:"rate_limiter"` - RequestLimits RequestLimits `mapstructure:"request_limits"` - Backend Backend `mapstructure:"backend"` - Compression Compression `mapstructure:"compression"` - Metrics Metrics `mapstructure:"metrics"` - Routes Routes `mapstructure:"routes"` + Port int `mapstructure:"port"` + AdminPort int `mapstructure:"admin_port"` + IndexResponse string `mapstructure:"index_response"` + Log Log `mapstructure:"log"` + RateLimiting RateLimiting `mapstructure:"rate_limiter"` + RequestLimits RequestLimits `mapstructure:"request_limits"` + RequestLogging RequestLogging `mapstructure:"request_logging"` + + StatusResponse string `mapstructure:"status_response"` + Backend Backend `mapstructure:"backend"` + Compression Compression `mapstructure:"compression"` + Metrics Metrics `mapstructure:"metrics"` + Routes Routes `mapstructure:"routes"` } // ValidateAndLog validates the config, terminating the program on any errors. @@ -121,6 +140,7 @@ func (cfg *Configuration) ValidateAndLog() { cfg.Log.validateAndLog() cfg.RateLimiting.validateAndLog() cfg.RequestLimits.validateAndLog() + cfg.RequestLogging.validateAndLog() if err := cfg.Backend.validateAndLog(); err != nil { log.Fatalf("%s", err.Error()) @@ -161,11 +181,27 @@ func (cfg *RateLimiting) validateAndLog() { log.Infof("config.rate_limiter.num_requests: %d", cfg.MaxRequestsPerSecond) } +type RequestLogging struct { + // RefererSamplingRate represents the probability of Prebid Cache loging the incoming request referer header + // chance = 1.0 => always log, + // chance = 0.0 => never log + RefererSamplingRate float64 `mapstructure:"referer_sampling_rate"` +} + +func (cfg *RequestLogging) validateAndLog() { + if cfg.RefererSamplingRate >= 0.0 && cfg.RefererSamplingRate <= 1.0 { + log.Infof("config.request_logging.referer_sampling_rate: %s", strconv.FormatFloat(cfg.RefererSamplingRate, 'f', -1, 64)) + } else { + log.Fatalf("invalid config.request_logging.referer_sampling_rate: value must be positive and not greater than 1.0. Got %s", strconv.FormatFloat(cfg.RefererSamplingRate, 'f', -1, 64)) + } +} + type RequestLimits struct { MaxSize int `mapstructure:"max_size_bytes"` MaxNumValues int `mapstructure:"max_num_values"` MaxTTLSeconds int `mapstructure:"max_ttl_seconds"` AllowSettingKeys bool `mapstructure:"allow_setting_keys"` + MaxHeaderSize int `mapstructure:"max_header_size_bytes"` } func (cfg *RequestLimits) validateAndLog() { @@ -188,6 +224,12 @@ func (cfg *RequestLimits) validateAndLog() { } else { log.Fatalf("invalid config.request_limits.max_num_values: %d. Value cannot be negative.", cfg.MaxNumValues) } + + if cfg.MaxHeaderSize >= 0 { + log.Infof("config.request_limits.max_header_size_bytes: %d", cfg.MaxHeaderSize) + } else { + log.Fatalf("invalid config.request_limits.max_header_size_bytes: %d. Value cannot be negative.", cfg.MaxHeaderSize) + } } type Compression struct { @@ -293,19 +335,12 @@ type PrometheusMetrics struct { Enabled bool `mapstructure:"enabled"` } +// validateAndLog will error out when the value of port is 0 func (promMetricsConfig *PrometheusMetrics) validateAndLog() { - // validate if promMetricsConfig.Port == 0 { log.Fatalf(`Despite being enabled, prometheus metrics came with an empty port number: config.metrics.prometheus.port = 0`) } - if promMetricsConfig.Namespace == "" { - log.Fatalf(`Despite being enabled, prometheus metrics came with an empty name space: config.metrics.prometheus.namespace = %s.`, promMetricsConfig.Namespace) - } - if promMetricsConfig.Subsystem == "" { - log.Fatalf(`Despite being enabled, prometheus metrics came with an empty subsystem value: config.metrics.prometheus.subsystem = \"\".`) - } - // log log.Infof("config.metrics.prometheus.namespace: %s", promMetricsConfig.Namespace) log.Infof("config.metrics.prometheus.subsystem: %s", promMetricsConfig.Subsystem) log.Infof("config.metrics.prometheus.port: %d", promMetricsConfig.Port) diff --git a/config/config_test.go b/config/config_test.go index 536f33c2..7e1a5477 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,7 +1,6 @@ package config import ( - "fmt" "os" "path/filepath" "strings" @@ -13,6 +12,7 @@ import ( testLogrus "github.com/sirupsen/logrus/hooks/test" "github.com/spf13/viper" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDefaults(t *testing.T) { @@ -645,13 +645,12 @@ func TestPrometheusValidateAndLog(t *testing.T) { } testCases := []aTest{ { - description: "[1] Port invalid, Namespace valid, Subsystem valid. Expect error", + description: "Port invalid, both Namespace and Subsystem were set. Expect error", prometheusConfig: &PrometheusMetrics{ Port: 0, Namespace: "prebid", Subsystem: "cache", }, - //out expectError: true, expectedLogInfo: []logComponents{ { @@ -673,19 +672,14 @@ func TestPrometheusValidateAndLog(t *testing.T) { }, }, { - description: "[2] Port valid, Namespace invalid, Subsystem valid. Expect error", + description: "Port valid, Namespace empty, Subsystem set. Don't expect error", prometheusConfig: &PrometheusMetrics{ Port: 8080, Namespace: "", Subsystem: "cache", }, - //out - expectError: true, + expectError: false, expectedLogInfo: []logComponents{ - { - msg: `Despite being enabled, prometheus metrics came with an empty name space: config.metrics.prometheus.namespace = .`, - lvl: logrus.FatalLevel, - }, { msg: "config.metrics.prometheus.namespace: ", lvl: logrus.InfoLevel, @@ -701,19 +695,14 @@ func TestPrometheusValidateAndLog(t *testing.T) { }, }, { - description: "[3] Port valid, Namespace valid, Subsystem invalid. Expect error", + description: "Port valid, Namespace set, Subsystem empty. Don't expect error", prometheusConfig: &PrometheusMetrics{ Port: 8080, Namespace: "prebid", Subsystem: "", }, - //out - expectError: true, + expectError: false, expectedLogInfo: []logComponents{ - { - msg: `Despite being enabled, prometheus metrics came with an empty subsystem value: config.metrics.prometheus.subsystem = \"\".`, - lvl: logrus.FatalLevel, - }, { msg: "config.metrics.prometheus.namespace: prebid", lvl: logrus.InfoLevel, @@ -729,13 +718,12 @@ func TestPrometheusValidateAndLog(t *testing.T) { }, }, { - description: "[4] Port valid, Namespace valid, Subsystem valid. Expect elements in log", + description: "Port valid, both Namespace and Subsystem set. Expect elements in log", prometheusConfig: &PrometheusMetrics{ Port: 8080, Namespace: "prebid", Subsystem: "cache", }, - //out expectError: false, expectedLogInfo: []logComponents{ { @@ -752,6 +740,29 @@ func TestPrometheusValidateAndLog(t *testing.T) { }, }, }, + { + description: "Port valid, Namespace and Subsystem empty. Expect log messages with blank Namespace and Subsystem", + prometheusConfig: &PrometheusMetrics{ + Port: 8080, + Namespace: "", + Subsystem: "", + }, + expectError: false, + expectedLogInfo: []logComponents{ + { + msg: "config.metrics.prometheus.namespace: ", + lvl: logrus.InfoLevel, + }, + { + msg: "config.metrics.prometheus.subsystem: ", + lvl: logrus.InfoLevel, + }, + { + msg: "config.metrics.prometheus.port: 8080", + lvl: logrus.InfoLevel, + }, + }, + }, } // logrus entries will be recorded to this `hook` object so we can compare and assert them @@ -762,7 +773,7 @@ func TestPrometheusValidateAndLog(t *testing.T) { var fatal bool logrus.StandardLogger().ExitFunc = func(int) { fatal = true } - for j, tc := range testCases { + for _, tc := range testCases { // Reset the fatal flag to false every test fatal = false @@ -770,10 +781,10 @@ func TestPrometheusValidateAndLog(t *testing.T) { tc.prometheusConfig.validateAndLog() // Assert logrus expected entries - if assert.Equal(t, len(tc.expectedLogInfo), len(hook.Entries), "Incorrect number of entries were logged to logrus in test %d: len(tc.expectedLogInfo) = %d len(hook.Entries) = %d", j, len(tc.expectedLogInfo), len(hook.Entries)) { + if assert.Equal(t, len(tc.expectedLogInfo), len(hook.Entries), "Incorrect number of entries were logged to logrus in test %s.", tc.description) { for i := 0; i < len(tc.expectedLogInfo); i++ { assert.Equal(t, tc.expectedLogInfo[i].msg, hook.Entries[i].Message) - assert.Equal(t, tc.expectedLogInfo[i].lvl, hook.Entries[i].Level, "Expected Info entry in log") + assert.Equal(t, tc.expectedLogInfo[i].lvl, hook.Entries[i].Level, "Expected Info entry in log. Test %s.", tc.description) } } else { return @@ -856,6 +867,18 @@ func TestRequestLimitsValidateAndLog(t *testing.T) { }, expectFatal: true, }, + { + description: "Negative max_header_size_bytes, expect fatal level log and early exit", + inRequestLimitsCfg: &RequestLimits{MaxHeaderSize: -1}, + expectedLogInfo: []logComponents{ + {msg: `config.request_limits.allow_setting_keys: false`, lvl: logrus.InfoLevel}, + {msg: `config.request_limits.max_ttl_seconds: 0`, lvl: logrus.InfoLevel}, + {msg: `config.request_limits.max_size_bytes: 0`, lvl: logrus.InfoLevel}, + {msg: `config.request_limits.max_num_values: 0`, lvl: logrus.InfoLevel}, + {msg: `invalid config.request_limits.max_header_size_bytes: -1. Value cannot be negative.`, lvl: logrus.FatalLevel}, + }, + expectFatal: true, + }, } //substitute logger exit function so execution doesn't get interrupted @@ -892,6 +915,89 @@ func TestRequestLimitsValidateAndLog(t *testing.T) { } } +func TestRequestLogging(t *testing.T) { + hook := testLogrus.NewGlobal() + + type logComponents struct { + msg string + lvl logrus.Level + } + + testCases := []struct { + name string + inRequestLoggingCfg *RequestLogging + expectedLogInfo []logComponents + }{ + { + name: "invalid_negative", // must be greater or equal to zero. Expect fatal log + inRequestLoggingCfg: &RequestLogging{ + RefererSamplingRate: -0.1, + }, + expectedLogInfo: []logComponents{ + {msg: `invalid config.request_logging.referer_sampling_rate: value must be positive and not greater than 1.0. Got -0.1`, lvl: logrus.FatalLevel}, + }, + }, + { + name: "invalid_high", // must be less than or equal to 1. expect fatal log. + inRequestLoggingCfg: &RequestLogging{ + RefererSamplingRate: 1.1, + }, + expectedLogInfo: []logComponents{ + {msg: `invalid config.request_logging.referer_sampling_rate: value must be positive and not greater than 1.0. Got 1.1`, lvl: logrus.FatalLevel}, + }, + }, + { + name: "valid_one", // sampling rate of 1.0 is between the acceptable threshold. Expect info log" + inRequestLoggingCfg: &RequestLogging{ + RefererSamplingRate: 1.0, + }, + expectedLogInfo: []logComponents{ + {msg: `config.request_logging.referer_sampling_rate: 1`, lvl: logrus.InfoLevel}, + }, + }, + { + name: "valid_zero", // sampling rate of 0.0 is between the acceptable threshold. Expect info log. + inRequestLoggingCfg: &RequestLogging{ + RefererSamplingRate: 0.0, + }, + expectedLogInfo: []logComponents{ + {msg: `config.request_logging.referer_sampling_rate: 0`, lvl: logrus.InfoLevel}, + }, + }, + { + name: "valid", + inRequestLoggingCfg: &RequestLogging{ + RefererSamplingRate: 0.1111, + }, + expectedLogInfo: []logComponents{ + {msg: `config.request_logging.referer_sampling_rate: 0.1111`, lvl: logrus.InfoLevel}, + }, + }, + } + + //substitute logger exit function so execution doesn't get interrupted + defer func() { logrus.StandardLogger().ExitFunc = nil }() + logrus.StandardLogger().ExitFunc = func(int) {} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.inRequestLoggingCfg.validateAndLog() + + // assertions + require.Len(t, hook.Entries, len(tc.expectedLogInfo), tc.name+":log_entries") + for i := 0; i < len(tc.expectedLogInfo); i++ { + assert.Equal(t, tc.expectedLogInfo[i].msg, hook.Entries[i].Message, tc.name+":message") + assert.Equal(t, tc.expectedLogInfo[i].lvl, hook.Entries[i].Level, tc.name+":log_level") + } + + //Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) + + }) + } +} + func TestCompressionValidateAndLog(t *testing.T) { // logrus entries will be recorded to this `hook` object so we can compare and assert them @@ -1058,18 +1164,20 @@ func TestConfigurationValidateAndLog(t *testing.T) { expectedConfig := getExpectedDefaultConfig() expectedLogInfo := []logComponents{ - {msg: fmt.Sprintf("config.port: %d", expectedConfig.Port), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.admin_port: %d", expectedConfig.AdminPort), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.log.level: %s", expectedConfig.Log.Level), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.rate_limiter.enabled: %t", expectedConfig.RateLimiting.Enabled), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.rate_limiter.num_requests: %d", expectedConfig.RateLimiting.MaxRequestsPerSecond), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.request_limits.allow_setting_keys: %v", expectedConfig.RequestLimits.AllowSettingKeys), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.request_limits.max_ttl_seconds: %d", expectedConfig.RequestLimits.MaxTTLSeconds), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.request_limits.max_size_bytes: %d", expectedConfig.RequestLimits.MaxSize), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.request_limits.max_num_values: %d", expectedConfig.RequestLimits.MaxNumValues), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.backend.type: %s", expectedConfig.Backend.Type), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("config.compression.type: %s", expectedConfig.Compression.Type), lvl: logrus.InfoLevel}, - {msg: fmt.Sprintf("Prebid Cache will run without metrics"), lvl: logrus.InfoLevel}, + {msg: "config.port: 2424", lvl: logrus.InfoLevel}, + {msg: "config.admin_port: 2525", lvl: logrus.InfoLevel}, + {msg: "config.log.level: info", lvl: logrus.InfoLevel}, + {msg: "config.rate_limiter.enabled: true", lvl: logrus.InfoLevel}, + {msg: "config.rate_limiter.num_requests: 100", lvl: logrus.InfoLevel}, + {msg: "config.request_limits.allow_setting_keys: false", lvl: logrus.InfoLevel}, + {msg: "config.request_limits.max_ttl_seconds: 3600", lvl: logrus.InfoLevel}, + {msg: "config.request_limits.max_size_bytes: 10240", lvl: logrus.InfoLevel}, + {msg: "config.request_limits.max_num_values: 10", lvl: logrus.InfoLevel}, + {msg: "config.request_limits.max_header_size_bytes: 1048576", lvl: logrus.InfoLevel}, + {msg: "config.request_logging.referer_sampling_rate: 0", lvl: logrus.InfoLevel}, + {msg: "config.backend.type: memory", lvl: logrus.InfoLevel}, + {msg: "config.compression.type: snappy", lvl: logrus.InfoLevel}, + {msg: "Prebid Cache will run without metrics", lvl: logrus.InfoLevel}, } // Run test @@ -1078,7 +1186,7 @@ func TestConfigurationValidateAndLog(t *testing.T) { // Assertions if assert.Len(t, hook.Entries, len(expectedLogInfo)) { for i := 0; i < len(expectedLogInfo); i++ { - assert.True(t, strings.HasPrefix(hook.Entries[i].Message, expectedLogInfo[i].msg), "Wrong message") + assert.Equal(t, expectedLogInfo[i].msg, hook.Entries[i].Message, "Wrong message") assert.Equal(t, expectedLogInfo[i].lvl, hook.Entries[i].Level, "Wrong log level") } } @@ -1184,7 +1292,8 @@ func getExpectedDefaultConfig() Configuration { Hosts: []string{}, }, Aerospike: Aerospike{ - Hosts: []string{}, + Hosts: []string{}, + MaxReadRetries: 2, }, Cassandra: Cassandra{ DefaultTTL: utils.CASSANDRA_DEFAULT_TTL_SECONDS, @@ -1192,6 +1301,9 @@ func getExpectedDefaultConfig() Configuration { Redis: Redis{ ExpirationMinutes: utils.REDIS_DEFAULT_EXPIRATION_MINUTES, }, + Ignite: Ignite{ + Headers: map[string]string{}, + }, }, Compression: Compression{ Type: CompressionType("snappy"), @@ -1200,10 +1312,14 @@ func getExpectedDefaultConfig() Configuration { Enabled: true, MaxRequestsPerSecond: 100, }, + RequestLogging: RequestLogging{ + RefererSamplingRate: 0.00, + }, RequestLimits: RequestLimits{ MaxSize: 10240, MaxNumValues: 10, MaxTTLSeconds: 3600, + MaxHeaderSize: 1048576, }, Routes: Routes{ AllowPublicWrite: true, @@ -1229,17 +1345,20 @@ func getExpectedFullConfigForTestFile() Configuration { MaxNumValues: 10, MaxTTLSeconds: 5000, AllowSettingKeys: true, + MaxHeaderSize: 16384, //16KiB }, Backend: Backend{ Type: BackendMemory, Aerospike: Aerospike{ - DefaultTTL: 3600, - Host: "aerospike.prebid.com", - Hosts: []string{"aerospike2.prebid.com", "aerospike3.prebid.com"}, - Port: 3000, - Namespace: "whatever", - User: "foo", - Password: "bar", + DefaultTTLSecs: 3600, + Host: "aerospike.prebid.com", + Hosts: []string{"aerospike2.prebid.com", "aerospike3.prebid.com"}, + Port: 3000, + Namespace: "whatever", + User: "foo", + Password: "bar", + MaxReadRetries: 2, + ConnIdleTimeoutSecs: 2, }, Cassandra: Cassandra{ Hosts: "127.0.0.1", @@ -1260,6 +1379,18 @@ func getExpectedFullConfigForTestFile() Configuration { InsecureSkipVerify: false, }, }, + Ignite: Ignite{ + Scheme: "http", + Host: "127.0.0.1", + Port: 8080, + Headers: map[string]string{ + "Content-Length": "0", + }, + Cache: IgniteCache{ + Name: "whatever", + CreateOnStart: false, + }, + }, }, Compression: Compression{ Type: CompressionType("snappy"), diff --git a/config/configtest/sample_full_config.yaml b/config/configtest/sample_full_config.yaml index 0f1454ee..23854c9f 100644 --- a/config/configtest/sample_full_config.yaml +++ b/config/configtest/sample_full_config.yaml @@ -11,6 +11,7 @@ request_limits: max_num_values: 10 max_ttl_seconds: 5000 allow_setting_keys: true + max_header_size_bytes: 16384 backend: type: "memory" aerospike: @@ -21,6 +22,7 @@ backend: namespace: "whatever" user: "foo" password: "bar" + connection_idle_timeout_seconds: 2 cassandra: hosts: "127.0.0.1" keyspace: "prebid" @@ -36,6 +38,16 @@ backend: tls: enabled: false insecure_skip_verify: false + ignite: + scheme: "http" + host: "127.0.0.1" + port: 8080 + secure: false + headers: !!omap + - Content-Length: 0 + cache: + name: "whatever" + create_on_start: false compression: type: "snappy" metrics: diff --git a/endpoints/get.go b/endpoints/get.go index ebd00e54..cd225ca6 100644 --- a/endpoints/get.go +++ b/endpoints/get.go @@ -16,20 +16,28 @@ import ( // GetHandler serves "GET /cache" requests. type GetHandler struct { - backend backends.Backend - metrics *metrics.Metrics + backend backends.Backend + metrics *metrics.Metrics + cfg getHandlerConfig +} + +type getHandlerConfig struct { allowCustomKeys bool + refererLogRate float64 } // NewGetHandler returns the handle function for the "/cache" endpoint when it receives a GET request -func NewGetHandler(storage backends.Backend, metrics *metrics.Metrics, allowCustomKeys bool) func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { +func NewGetHandler(storage backends.Backend, metrics *metrics.Metrics, allowCustomKeys bool, refererSamplingRate float64) func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { getHandler := &GetHandler{ // Assign storage client to get endpoint backend: storage, // pass metrics engine metrics: metrics, - // Pass configuration value - allowCustomKeys: allowCustomKeys, + // Pass configuration values + cfg: getHandlerConfig{ + allowCustomKeys: allowCustomKeys, + refererLogRate: refererSamplingRate, + }, } // Return handle function @@ -38,9 +46,16 @@ func NewGetHandler(storage backends.Backend, metrics *metrics.Metrics, allowCust func (e *GetHandler) handle(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { e.metrics.RecordGetTotal() + + // If incoming request comes with a referer header, there's a e.cfg.refererLogRate percent chance + // getting it logged + if referer := r.Referer(); referer != "" && utils.RandomPick(e.cfg.refererLogRate) { + log.Info("GET request Referer header: " + referer) + } + start := time.Now() - uuid, parseErr := parseUUID(r, e.allowCustomKeys) + uuid, parseErr := parseUUID(r, e.cfg.allowCustomKeys) if parseErr != nil { // parseUUID either returns http.StatusBadRequest or http.StatusNotFound. Both should be // accounted using RecordGetBadRequest() diff --git a/endpoints/get_test.go b/endpoints/get_test.go index 021c5f08..53df79b7 100644 --- a/endpoints/get_test.go +++ b/endpoints/get_test.go @@ -1,18 +1,85 @@ package endpoints import ( + "bytes" "net/http" + "net/http/httptest" "testing" "github.com/julienschmidt/httprouter" - "github.com/prebid/prebid-cache/backends" - "github.com/prebid/prebid-cache/metrics" - "github.com/prebid/prebid-cache/metrics/metricstest" "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" + testLogrus "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" + + "github.com/prebid/prebid-cache/backends" + "github.com/prebid/prebid-cache/metrics" + "github.com/prebid/prebid-cache/metrics/metricstest" ) +func TestGetJsonTests(t *testing.T) { + hook := testLogrus.NewGlobal() + defer func() { logrus.StandardLogger().ExitFunc = nil }() + logrus.StandardLogger().ExitFunc = func(int) {} + + jsonTests := listJsonFiles("sample-requests/get-endpoint") + for _, testFile := range jsonTests { + var backend backends.Backend + mockMetrics := metricstest.CreateMockMetrics() + tc, backend, m, err := setupJsonTest(&mockMetrics, backend, testFile) + if !assert.NoError(t, err, "%s", testFile) { + hook.Reset() + continue + } + + router := httprouter.New() + router.GET("/cache", NewGetHandler(backend, m, tc.HostConfig.AllowSettingKeys, tc.HostConfig.RefererLogRate)) + request, err := http.NewRequest("GET", "/cache?"+tc.Request.Query, nil) + if !assert.NoError(t, err, "Failed to create a GET request: %v", err) { + hook.Reset() + assert.Nil(t, hook.LastEntry()) + continue + } + + if len(tc.Request.Headers) > 0 { + for header, values := range tc.Request.Headers { + for _, v := range values { + request.Header.Set(header, v) + } + } + } + + rr := httptest.NewRecorder() + + // Run test + router.ServeHTTP(rr, request) + + // Assertions + assert.Equal(t, tc.ExpectedOutput.Code, rr.Code, testFile) + + // Assert this is a valid test that expects either an error or a GetResponse + if !assert.False(t, len(tc.ExpectedOutput.ErrorMsg) > 0 && len(tc.ExpectedOutput.GetOutput) > 0, "%s must come with either an expected error message or an expected response", testFile) { + hook.Reset() + assert.Nil(t, hook.LastEntry()) + continue + } + + // If error is expected, assert error message with the response body + if len(tc.ExpectedOutput.ErrorMsg) > 0 { + assert.Equal(t, tc.ExpectedOutput.ErrorMsg, rr.Body.String(), testFile) + } else { + assert.Equal(t, tc.ExpectedOutput.GetOutput, rr.Body.String(), testFile) + } + + assertLogEntries(t, tc.ExpectedLogEntries, hook.Entries, testFile) + metricstest.AssertMetrics(t, tc.ExpectedMetrics, mockMetrics) + + // Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) + } +} + func TestGetInvalidUUIDs(t *testing.T) { backend := backends.NewMemoryBackend() router := httprouter.New() @@ -24,7 +91,7 @@ func TestGetInvalidUUIDs(t *testing.T) { }, } - router.GET("/cache", NewGetHandler(backend, m, false)) + router.GET("/cache", NewGetHandler(backend, m, false, 0.0)) getResults := doMockGet(t, router, "fdd9405b-ef2b-46da-a55a-2f526d338e16") if getResults.Code != http.StatusNotFound { @@ -40,13 +107,24 @@ func TestGetInvalidUUIDs(t *testing.T) { } func TestGetHandler(t *testing.T) { + preExistentDataInBackend := map[string]string{ + "non-36-char-key-maps-to-json": `json{"field":"value"}`, + "36-char-key-maps-to-non-xml-nor-json": `#@!*{"desc":"data got malformed and is not prefixed with 'xml' nor 'json' substring"}`, + "36-char-key-maps-to-actual-xml-value": "xmlxml data here", + } + type logEntry struct { msg string lvl logrus.Level } + type testConfig struct { + allowKeys bool + refererSamplingRate float64 + } type testInput struct { - uuid string - allowKeys bool + uuid string + cfg testConfig + reqHeaders map[string]string } type testOutput struct { responseCode int @@ -63,8 +141,7 @@ func TestGetHandler(t *testing.T) { { "Missing UUID. Return http error but don't interrupt server's execution", testInput{ - uuid: "", - allowKeys: false, + uuid: "", }, testOutput{ responseCode: http.StatusBadRequest, @@ -84,8 +161,7 @@ func TestGetHandler(t *testing.T) { { "Prebid Cache wasn't configured to allow custom keys therefore, it doesn't allow for keys different than 36 char long. Respond with http error and don't interrupt server's execution", testInput{ - uuid: "non-36-char-key-maps-to-json", - allowKeys: false, + uuid: "non-36-char-key-maps-to-json", }, testOutput{ responseCode: http.StatusNotFound, @@ -105,8 +181,8 @@ func TestGetHandler(t *testing.T) { { "Configuration that allows custom keys. These are not required to be 36 char long. Since the uuid maps to a value, return it along a 200 status code", testInput{ - uuid: "non-36-char-key-maps-to-json", - allowKeys: true, + uuid: "non-36-char-key-maps-to-json", + cfg: testConfig{allowKeys: true}, }, testOutput{ responseCode: http.StatusOK, @@ -167,6 +243,48 @@ func TestGetHandler(t *testing.T) { }, }, }, + { + "Sampling rate is set to 100% but request comes with no referer header. No logs expected.", + testInput{ + uuid: "36-char-key-maps-to-actual-xml-value", + cfg: testConfig{refererSamplingRate: 1.0}, + reqHeaders: map[string]string{"OtherHeader": "headervalue"}, + }, + testOutput{ + responseCode: http.StatusOK, + responseBody: "xml data here", + logEntries: []logEntry{}, + expectedMetrics: []string{ + "RecordGetTotal", + "RecordGetDuration", + }, + }, + }, + { + "Sampling rate is set to 100%. Expect request referer header to be logged.", + testInput{ + uuid: "36-char-key-maps-to-actual-xml-value", + cfg: testConfig{refererSamplingRate: 1.0}, + reqHeaders: map[string]string{ + "Referer": "anyreferer", + "OtherHeader": "headervalue", + }, + }, + testOutput{ + responseCode: http.StatusOK, + responseBody: "xml data here", + logEntries: []logEntry{ + { + msg: "GET request Referer header: anyreferer", + lvl: logrus.InfoLevel, + }, + }, + expectedMetrics: []string{ + "RecordGetTotal", + "RecordGetDuration", + }, + }, + }, } // Lower Log Treshold so we can see DebugLevel entries in our mock logrus log @@ -184,7 +302,11 @@ func TestGetHandler(t *testing.T) { fatal = false // Set up test object - backend := newMockBackend() + backend, err := backends.NewMemoryBackendWithValues(preExistentDataInBackend) + if !assert.NoError(t, err, "%s. Mock backend could not be created", test.desc) { + hook.Reset() + continue + } router := httprouter.New() mockMetrics := metricstest.CreateMockMetrics() m := &metrics.Metrics{ @@ -192,10 +314,25 @@ func TestGetHandler(t *testing.T) { &mockMetrics, }, } - router.GET("/cache", NewGetHandler(backend, m, test.in.allowKeys)) + router.GET("/cache", NewGetHandler(backend, m, test.in.cfg.allowKeys, test.in.cfg.refererSamplingRate)) // Run test - getResults := doMockGet(t, router, test.in.uuid) + getResults := httptest.NewRecorder() + + body := new(bytes.Buffer) + getReq, err := http.NewRequest("GET", "/cache"+"?uuid="+test.in.uuid, body) + + if len(test.in.reqHeaders) > 0 { + for k, v := range test.in.reqHeaders { + getReq.Header.Set(k, v) + } + } + + if !assert.NoError(t, err, "Failed to create a GET request: %v", err) { + hook.Reset() + continue + } + router.ServeHTTP(getResults, getReq) // Assert server response and status code assert.Equal(t, test.out.responseCode, getResults.Code, test.desc) diff --git a/endpoints/put.go b/endpoints/put.go index e4258109..702cdbe4 100644 --- a/endpoints/put.go +++ b/endpoints/put.go @@ -26,8 +26,9 @@ type PutHandler struct { } type putHandlerConfig struct { - maxNumValues int - allowKeys bool + maxNumValues int + allowKeys bool + refererLogRate float64 } type syncPools struct { @@ -36,7 +37,7 @@ type syncPools struct { } // NewPutHandler returns the handle function for the "/cache" endpoint when it receives a POST request -func NewPutHandler(storage backends.Backend, metrics *metrics.Metrics, maxNumValues int, allowKeys bool) func(http.ResponseWriter, *http.Request, httprouter.Params) { +func NewPutHandler(storage backends.Backend, metrics *metrics.Metrics, maxNumValues int, allowKeys bool, refererLogRate float64) func(http.ResponseWriter, *http.Request, httprouter.Params) { putHandler := &PutHandler{} // Assign storage client to put endpoint @@ -47,8 +48,9 @@ func NewPutHandler(storage backends.Backend, metrics *metrics.Metrics, maxNumVal // Pass configuration values putHandler.cfg = putHandlerConfig{ - maxNumValues: maxNumValues, - allowKeys: allowKeys, + maxNumValues: maxNumValues, + allowKeys: allowKeys, + refererLogRate: refererLogRate, } // Instantiate thread-safe memory pools @@ -107,6 +109,7 @@ func (e *PutHandler) parseRequest(r *http.Request) (*putRequest, error) { // - XML content gets unmarshaled in order to un-escape it and then gets // prepended by its type // - JSON content gets prepended by its type +// // No other formats are supported. func parsePutObject(p putObject) (string, error) { var toCache string @@ -123,15 +126,11 @@ func parsePutObject(p putObject) (string, error) { // Limit the type of data to XML or JSON if p.Type == utils.XML_PREFIX { - if p.Value[0] != byte('"') || p.Value[len(p.Value)-1] != byte('"') { - return "", utils.NewPBCError(utils.MALFORMED_XML, fmt.Sprintf("XML messages must have a String value. Found %v", p.Value)) - } - // Be careful about the cross-script escaping issues here. JSON requires quotation marks to be escaped, // for example... so we'll need to un-escape it before we consider it to be XML content. - var interpreted string - if err := json.Unmarshal(p.Value, &interpreted); err != nil { - return "", utils.NewPBCError(utils.MALFORMED_XML, fmt.Sprintf("Error unmarshalling XML value: %v", p.Value)) + interpreted, err := unescapeXML(p.Value) + if err != nil { + return "", err } toCache = p.Type + interpreted @@ -144,6 +143,20 @@ func parsePutObject(p putObject) (string, error) { return toCache, nil } +// unescapeXML unmarshalls the rawXML into a string in order to unescape characters +func unescapeXML(rawXML json.RawMessage) (string, error) { + if rawXML[0] != byte('"') || rawXML[len(rawXML)-1] != byte('"') { + return "", utils.NewPBCError(utils.MALFORMED_XML, fmt.Sprintf("XML messages must have a String value. Found %v", rawXML)) + } + + var interpreted string + if err := json.Unmarshal(rawXML, &interpreted); err != nil { + return "", utils.NewPBCError(utils.MALFORMED_XML, fmt.Sprintf("Error unmarshalling XML value: %v", rawXML)) + } + + return interpreted, nil +} + func classifyBackendError(err error, index int) error { if _, ok := err.(*backendDecorators.BadPayloadSize); ok { return utils.NewPBCError(utils.BAD_PAYLOAD_SIZE, fmt.Sprintf("POST /cache element %d exceeded max size: %v", index, err.Error())) @@ -173,6 +186,12 @@ func logBackendError(err error) { func (e *PutHandler) handle(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { e.metrics.RecordPutTotal() + // If incoming request comes with a referer header, there's a e.cfg.refererLogRate percent chance + // getting it logged + if referer := r.Referer(); referer != "" && utils.RandomPick(e.cfg.refererLogRate) { + logrus.Info("POST request Referer header: " + referer) + } + start := time.Now() bytes, err := e.processPutRequest(r) diff --git a/endpoints/put_test.go b/endpoints/put_test.go index 43d46356..428c052c 100644 --- a/endpoints/put_test.go +++ b/endpoints/put_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/go-redis/redis/v8" + "github.com/gofrs/uuid" "github.com/julienschmidt/httprouter" "github.com/prebid/prebid-cache/backends" backendConfig "github.com/prebid/prebid-cache/backends/config" @@ -33,174 +35,94 @@ import ( ) func TestPutJsonTests(t *testing.T) { - testGroups := []struct { - desc string - expectError bool - tests []string - }{ - { - desc: "Valid put requests. Expect 200 response", - expectError: false, - tests: []string{ - "sample-requests/put-endpoint/valid-whole/single-element-to-store.json", - "sample-requests/put-endpoint/valid-whole/no-elements-to-store.json", - "sample-requests/put-endpoint/valid-whole/multiple-elements-to-store.json", - "sample-requests/put-endpoint/valid-whole/valid-type-json.json", - "sample-requests/put-endpoint/valid-whole/valid-type-xml.json", - "sample-requests/put-endpoint/valid-whole/ttl-more-than-max.json", - "sample-requests/put-endpoint/valid-whole/ttl-missing.json", - }, - }, - { - desc: "Request tries to store more elements than the max allowed. Return error", - expectError: true, - tests: []string{ - "sample-requests/put-endpoint/invalid-number-of-elements/puts-max-num-values.json", - }, - }, - { - desc: "Invalid 'type' field values, expect error", - expectError: true, - tests: []string{ - "sample-requests/put-endpoint/invalid-types/type-missing.json", - "sample-requests/put-endpoint/invalid-types/type-unknown.json", - }, - }, - { - desc: "invalid 'value' field values, expect error", - expectError: true, - tests: []string{ - "sample-requests/put-endpoint/invalid-value/value-missing.json", - "sample-requests/put-endpoint/invalid-value/value-greater-than-max.json", - }, - }, - { - desc: "Valid when storing under custom keys is allowed, expect 200 responses", - expectError: false, - tests: []string{ - "sample-requests/put-endpoint/custom-keys/allowed/key-field-included.json", - "sample-requests/put-endpoint/custom-keys/allowed/key-field-missing.json", - }, - }, - { - desc: "Valid when storing under custom keys is not allowed, expect 200 responses", - expectError: false, - tests: []string{ - "sample-requests/put-endpoint/custom-keys/not-allowed/key-field-included.json", - }, - }, - } - - // logrus entries will be recorded to this `hook` object so we can compare and assert them hook := testLogrus.NewGlobal() - - //substitute logger exit function so execution doesn't get interrupted when log.Fatalf() call comes defer func() { logrus.StandardLogger().ExitFunc = nil }() logrus.StandardLogger().ExitFunc = func(int) {} - for _, group := range testGroups { - for _, testFile := range group.tests { - // TEST SETUP - // Read file - testInfo, err := parseTestInfo(testFile) - if !assert.NoError(t, err, "%v", err) { - continue - } + jsonTests := listJsonFiles("sample-requests/put-endpoint") + for _, testFile := range jsonTests { + var backend backends.Backend + mockMetrics := metricstest.CreateMockMetrics() + tc, backend, m, err := setupJsonTest(&mockMetrics, backend, testFile) + if !assert.NoError(t, err, "%s", testFile) { + hook.Reset() + continue + } - // Read test config - v := buildViperConfig(testInfo) - cfg := config.Configuration{} - err = v.Unmarshal(&cfg) - if !assert.NoError(t, err, "Viper could not parse configuration from test file: %s. Error:%s\n", testFile, err) { - continue - } + router := httprouter.New() + router.POST("/cache", NewPutHandler(backend, m, tc.HostConfig.MaxNumValues, tc.HostConfig.AllowSettingKeys, tc.HostConfig.RefererLogRate)) + request, err := http.NewRequest("POST", "/cache", strings.NewReader(string(tc.Request.Body))) + if !assert.NoError(t, err, "Failed to create a POST request. Test file: %s Error: %v", testFile, err) { + hook.Reset() + continue + } - // Instantiate memory backend, request, router, recorder - mockMetrics := metricstest.CreateMockMetrics() - m := &metrics.Metrics{ - MetricEngines: []metrics.CacheMetrics{ - &mockMetrics, - }, + if len(tc.Request.Headers) > 0 { + for header, values := range tc.Request.Headers { + for _, v := range values { + request.Header.Set(header, v) + } } + } - backend := backendConfig.NewBackend(cfg, m) - router := httprouter.New() - router.POST("/cache", NewPutHandler(backend, m, testInfo.ServerConfig.MaxNumValues, testInfo.ServerConfig.AllowSettingKeys)) - request, err := http.NewRequest("POST", "/cache", strings.NewReader(string(testInfo.PutRequest))) - assert.NoError(t, err, "Failed to create a POST request. Test file: %s Error: %v", testFile, err) - rr := httptest.NewRecorder() - - // RUN TEST - router.ServeHTTP(rr, request) - - // DO ASSERTIONS - // If error is expected, assert error message and non-200 status code - if group.expectError { - // Given that Prebid Cache still doesn't provide error details in an "errors" field describing the particular issues - // of each element that could not be stored, compare the entire response body that will contain the error message of - // the element that could not be stored. - assert.NotEqual(t, http.StatusOK, rr.Code, "Test %s failed. Expected error status code.", testFile) - assert.Equal(t, testInfo.ExpectedError, rr.Body.String(), "Error message differs from expected. Test file: %s", testFile) - } else { - // Given that no error is expected, assert a 200 status code was returned - if !assert.Equal(t, http.StatusOK, rr.Code, "Test %s failed. StatusCode = %d. Returned error: %s", testFile, rr.Code, rr.Body.String()) { - continue - } + rr := httptest.NewRecorder() - // Assert we returned the exact same elements in the 'Responses' array than in the request 'Puts' array - actualPutResponse := PutResponse{} - err = json.Unmarshal(rr.Body.Bytes(), &actualPutResponse) - if !assert.NoError(t, err, "Could not unmarshal %s. Test file: %s. Error:%s\n", rr.Body.String(), testFile, err) { - continue - } - assert.Len(t, actualPutResponse.Responses, len(testInfo.ExpectedResponse.Responses), "Actual response elements differ with expected. Test file: %s", testFile) - - // If custom keys are allowed, assert they are found in the actualPutResponse.Responses array - if testInfo.ServerConfig.AllowSettingKeys { - customKeyIndexes := []int{} - - // Unmarshal test request to extract custom keys - put := &putRequest{ - Puts: make([]putObject, 0), - } - err = json.Unmarshal(testInfo.PutRequest, put) - if !assert.NoError(t, err, "Could not put request %s. Test file: %s. Error:%s\n", testInfo.PutRequest, testFile, err) { - continue - } - for i, testInputElem := range put.Puts { - if len(testInputElem.Key) > 0 { - customKeyIndexes = append(customKeyIndexes, i) - } - } - - // Custom keys values must match and their position in the `actualPutResponse.Responses` array must be the exact same as they came in - // the incoming request - for _, index := range customKeyIndexes { - assert.Equal(t, testInfo.ExpectedResponse.Responses[index].UUID, actualPutResponse.Responses[index].UUID, "Custom key differs from expected in position %d. Test file: %s", index, testFile) - } - } - } + // RUN TEST + router.ServeHTTP(rr, request) - // Assert logrus expected entries - assertLogEntries(t, testInfo.ExpectedLogEntries, hook.Entries, testFile) + // ASSERTIONS + assert.Equal(t, tc.ExpectedOutput.Code, rr.Code, testFile) - // Reset log after every test and assert successful reset + // Assert this is a valid test that expects either an error or a PutResponse + if !assert.False(t, len(tc.ExpectedOutput.ErrorMsg) > 0 && tc.ExpectedOutput.PutOutput != nil, "%s must come with either an expected error message or an expected response", testFile) { hook.Reset() assert.Nil(t, hook.LastEntry()) + continue + } - // assert the put call above logged the expected metrics - metricstest.AssertMetrics(t, testInfo.ExpectedMetrics, mockMetrics) + // If error is expected, assert error message with the response body + if len(tc.ExpectedOutput.ErrorMsg) > 0 { + assert.Equal(t, tc.ExpectedOutput.ErrorMsg, rr.Body.String(), testFile) + } else { + // Assert we returned the exact same elements in the 'Responses' array than in the request 'Puts' array + var actualPutResponse PutResponse + err = json.Unmarshal(rr.Body.Bytes(), &actualPutResponse) + if !assert.NoError(t, err, "Could not unmarshal %s. Test file: %s.\n", rr.Body.String(), testFile) { + hook.Reset() + assert.Nil(t, hook.LastEntry()) + continue + } + assertResponseEntries(t, tc.ExpectedOutput.PutOutput.Responses, actualPutResponse.Responses, tc.HostConfig.AllowSettingKeys, testFile) } + + assertLogEntries(t, tc.ExpectedLogEntries, hook.Entries, testFile) + metricstest.AssertMetrics(t, tc.ExpectedMetrics, mockMetrics) + + // Reset log after every test and assert successful reset + hook.Reset() + assert.Nil(t, hook.LastEntry()) } } type testData struct { - ServerConfig testConfig `json:"serverConfig"` - PutRequest json.RawMessage `json:"putRequest"` - ExpectedResponse PutResponse `json:"expectedResponse"` - ExpectedLogEntries []logEntry `json:"expectedLogEntries"` - ExpectedError string `json:"expectedErrorMessage"` - ExpectedMetrics []string `json:"expectedMetrics"` + HostConfig hostConfig `json:"config"` + Request testRequest `json:"request"` + ExpectedOutput expectedOut `json:"expected_output"` + ExpectedLogEntries []logEntry `json:"expected_log_entries"` + ExpectedMetrics []string `json:"expected_metrics"` +} + +type testRequest struct { + Body json.RawMessage `json:"body"` + Headers map[string][]string `json:"headers"` + Query string `json:"query"` +} + +type expectedOut struct { + PutOutput *PutResponse `json:"put_response"` + GetOutput string `json:"get_response"` + Code int `json:"code"` + ErrorMsg string `json:"expected_error_message"` } type logEntry struct { @@ -208,14 +130,165 @@ type logEntry struct { Level uint32 `json:"level"` } -type testConfig struct { - AllowSettingKeys bool `json:"allow_setting_keys"` - MaxSizeBytes int `json:"max_size_bytes"` - MaxNumValues int `json:"max_num_values"` - MaxTTLSeconds int `json:"max_ttl_seconds"` +type hostConfig struct { + AllowSettingKeys bool `json:"allow_setting_keys"` + MaxSizeBytes int `json:"max_size_bytes"` + MaxNumValues int `json:"max_num_values"` + MaxTTLSeconds int `json:"max_ttl_seconds"` + FakeBackend fakeBackend `json:"fake_backend"` + RefererLogRate float64 `json:"referer_sampling_rate"` +} + +type fakeBackend struct { + ErrorMsg string `json:"throw_error_message"` + ReturnBool bool `json:"throw_bool"` + StoredData []storedData `json:"stored_data"` + ServerResponse string `json:"server_response"` + Type string `json:"storage_type"` +} + +type storedData struct { + Key string `json:"key"` + Value json.RawMessage `json:"value"` +} + +// newTestBackend returns an error-prone backend when a non-empty mockCfg.ErrorMsg string +// is provided. With an empty mockCfg.ErrorMsg, it returns a well-behaved mock backend. +func newTestBackend(fb fakeBackend, ttl int) backends.Backend { + var mb backends.Backend + var backendType config.BackendType = config.BackendType(fb.Type) + + if len(fb.ErrorMsg) > 0 { + // "errorProne" backend + switch backendType { + case config.BackendCassandra: + mb = backends.NewMockCassandraBackend( + ttl, + &backends.ErrorProneCassandraClient{ + Applied: fb.ReturnBool, + ServerError: errors.New(fb.ErrorMsg), + }, + ) + case config.BackendMemcache: + mb = backends.NewMockMemcacheBackend(&backends.ErrorProneMemcache{ServerError: errors.New(fb.ErrorMsg)}) + case config.BackendAerospike: + mb = backends.NewMockAerospikeBackend( + &backends.ErrorProneAerospikeClient{ + ServerError: fb.ErrorMsg, + }, + ) + case config.BackendRedis: + var serverErr error + if fb.ErrorMsg == "redis: nil" { + serverErr = redis.Nil + } else { + serverErr = errors.New(fb.ErrorMsg) + } + mb = backends.NewFakeRedisBackend( + backends.FakeRedisClient{ + StoredData: copyStoredData(fb.StoredData), + ServerError: serverErr, + Success: fb.ReturnBool, + }, + ) + case config.BackendIgnite: + mb = backends.NewFakeIgniteBackend([]byte(fb.ServerResponse), errors.New(fb.ErrorMsg)) + default: + mb = backends.NewErrorResponseMemoryBackend() + } + return mb + } + + // Well-behaved mock backend + switch backendType { + case config.BackendCassandra: + mb = backends.NewMockCassandraBackend( + ttl, + &backends.GoodCassandraClient{ + StoredData: copyStoredData(fb.StoredData), + }, + ) + case config.BackendMemcache: + mb = backends.NewMockMemcacheBackend( + &backends.GoodMemcache{ + StoredData: copyStoredData(fb.StoredData), + }, + ) + case config.BackendAerospike: + mb = backends.NewMockAerospikeBackend(&backends.GoodAerospikeClient{StoredData: copyStoredData(fb.StoredData)}) + case config.BackendRedis: + mb = backends.NewFakeRedisBackend( + backends.FakeRedisClient{ + StoredData: copyStoredData(fb.StoredData), + ServerError: nil, + Success: fb.ReturnBool, + }, + ) + case config.BackendIgnite: + mb = backends.NewFakeIgniteBackend([]byte(fb.ServerResponse), nil) + default: + mb, _ = backends.NewMemoryBackendWithValues(copyStoredData(fb.StoredData)) + } + + return mb +} + +func copyStoredData(elems []storedData) map[string]string { + cpy := make(map[string]string, len(elems)) + + for _, e := range elems { + interpreted, err := unescapeXML(e.Value) + if err != nil { + return nil + } + cpy[e.Key] = interpreted + } + + return cpy +} + +func listJsonFiles(path string) []string { + files, _ := ioutil.ReadDir(path) + var jsonFiles []string + + for _, f := range files { + var newPath string + if strings.HasSuffix(f.Name(), "/") { + newPath = fmt.Sprintf("%s%s", path, f.Name()) + } else { + newPath = fmt.Sprintf("%s/%s", path, f.Name()) + } + + if f.IsDir() { + jsonFiles = append(jsonFiles, listJsonFiles(newPath)...) + } else if strings.HasSuffix(newPath, ".json") { + jsonFiles = append(jsonFiles, newPath) + } + } + return jsonFiles +} + +func setupJsonTest(mockMetrics *metricstest.MockMetrics, backend backends.Backend, testFile string) (*testData, backends.Backend, *metrics.Metrics, error) { + tc, err := parseTestInfo(testFile) + if err != nil { + return nil, backend, nil, err + } + + m := &metrics.Metrics{MetricEngines: []metrics.CacheMetrics{mockMetrics}} + backend = newTestBackend(tc.HostConfig.FakeBackend, tc.HostConfig.MaxTTLSeconds) + + v := buildViperConfig(tc) + cfg := config.Configuration{} + err = v.Unmarshal(&cfg) + if err != nil { + return nil, backend, nil, errors.New(fmt.Sprintf("Viper could not parse configuration from test file: %s. Error:%s\n", testFile, err)) + } + + backend = backendConfig.DecorateBackend(cfg, m, backend) + + return tc, backend, m, nil } -// Remove this function in the future and make it part of the mock metrics to self-assert if possible. func parseTestInfo(testFile string) (*testData, error) { var jsonTest []byte var err error @@ -234,47 +307,123 @@ func buildViperConfig(testInfo *testData) *viper.Viper { v := viper.New() v.SetDefault("backend.type", "memory") v.SetDefault("compression.type", "none") - v.SetDefault("request_limits.allow_setting_keys", testInfo.ServerConfig.AllowSettingKeys) - if testInfo.ServerConfig.MaxSizeBytes == 0 { - testInfo.ServerConfig.MaxSizeBytes = 50 + v.SetDefault("request_limits.allow_setting_keys", testInfo.HostConfig.AllowSettingKeys) + if testInfo.HostConfig.MaxSizeBytes == 0 { + testInfo.HostConfig.MaxSizeBytes = 50 } - v.SetDefault("request_limits.max_size_bytes", testInfo.ServerConfig.MaxSizeBytes) + v.SetDefault("request_limits.max_size_bytes", testInfo.HostConfig.MaxSizeBytes) - if testInfo.ServerConfig.MaxNumValues == 0 { - testInfo.ServerConfig.MaxNumValues = 1 + if testInfo.HostConfig.MaxNumValues == 0 { + testInfo.HostConfig.MaxNumValues = 1 } - v.SetDefault("request_limits.max_num_values", testInfo.ServerConfig.MaxNumValues) - v.SetDefault("request_limits.max_ttl_seconds", testInfo.ServerConfig.MaxTTLSeconds) + v.SetDefault("request_limits.max_num_values", testInfo.HostConfig.MaxNumValues) + v.SetDefault("request_limits.max_ttl_seconds", testInfo.HostConfig.MaxTTLSeconds) return v } -// assertLogEntries asserts logrus entries with expectedLogEntries. It is a test helper function that will make a unit test fail if +// assertResponseEntries +func assertResponseEntries(t *testing.T, expectedResponses []putResponseObject, actualResponses []putResponseObject, allowSettingKeys bool, testFile string) { + assert.Len(t, actualResponses, len(expectedResponses), "Actual response elements differ with expected. Test file: %s", testFile) + + // Prebid Cache processes every element in parallel which makes for elements in + // Given the parallel nature of Prebid Cache's processing, elements in actualResponses and + // expectedResponses may not come in the exact same order. Use a map instead. + expectedUUIDs := make(map[string]int, len(expectedResponses)) + for _, resp := range expectedResponses { + expectedUUIDs[resp.UUID] += 1 + } + + // Compare output with expected entries found in map + for _, resp := range actualResponses { + // Categorize UUID + uuidOut := resp.UUID + if len(uuidOut) > 0 { + _, err := uuid.FromString(uuidOut) + if err != nil { + // Custom key. If not allowed, fail test + if !assert.True(t, allowSettingKeys, "Custom keys were not expected and UUID \"%s\" is neither random nor empty. Test file: %s.\n", resp.UUID, testFile) { + return + } + } else { + // Random keys are labeled "random" in JSON test files + uuidOut = "random" + } + } + + occurrences, found := expectedUUIDs[uuidOut] + if !assert.True(t, found, "An element in the response array with UUID \"%s\" was not expected. Test file: %s.\n", uuidOut, testFile) { + return + } + occurrences -= 1 + if !assert.False(t, occurrences < 0, "An element in the response array with UUID \"%s\" was not expected. Test file: %s.\n", uuidOut, testFile) { + return + } + if occurrences == 0 { + delete(expectedUUIDs, uuidOut) + } else { + expectedUUIDs[uuidOut] = occurrences + } + } + // Do we need this? + for nonAccountedUUID := range expectedUUIDs { + assert.Fail(t, "UUID \"%s\" was expected and not found in the response body. Test file: %s.\n", nonAccountedUUID, testFile) + } +} + +// assertLogEntries asserts logrus entries with expectedLogEntries. It is a test helper function that makes a unit test fail if // expected values are not found func assertLogEntries(t *testing.T, expectedLogEntries []logEntry, actualLogEntries []logrus.Entry, testFile string) { t.Helper() - assert.Equal(t, len(expectedLogEntries), len(actualLogEntries), "Incorrect number of entries were logged to logrus in test %s: len(expectedLogEntries) = %d, len(actualLogEntries) = %d", testFile, len(expectedLogEntries), len(actualLogEntries)) - for i := 0; i < len(actualLogEntries); i++ { - assert.Equal(t, expectedLogEntries[i].Message, actualLogEntries[i].Message, "Test case %s log message differs", testFile) - assert.Equal(t, expectedLogEntries[i].Level, uint32(actualLogEntries[i].Level), "Test case %s log level differs", testFile) + if assert.Equal(t, len(expectedLogEntries), len(actualLogEntries), "Incorrect number of entries were logged to logrus in test %s. Actual log entries:\n %v", testFile, actualLogEntries) { + for i := 0; i < len(actualLogEntries); i++ { + assert.Equal(t, expectedLogEntries[i].Message, actualLogEntries[i].Message, "Test case %s log message differs", testFile) + assert.Equal(t, expectedLogEntries[i].Level, uint32(actualLogEntries[i].Level), "Test case %s log level differs", testFile) + } } } // TestStatusEndpointReadiness asserts the http:///status endpoint // is responds as expected. func TestStatusEndpointReadiness(t *testing.T) { - // Set up - requestRecorder := httptest.NewRecorder() + type testCase struct { + description string + handler httprouter.Handle + expectedRespCode int + expectedRespBody *bytes.Buffer + } - router := httprouter.New() - router.GET("/status", Status) - req, _ := http.NewRequest("GET", "/status", new(bytes.Buffer)) + testCases := []testCase{ + { + description: "Empty response", + handler: NewStatusEndpoint(""), + expectedRespCode: http.StatusNoContent, + expectedRespBody: bytes.NewBuffer(nil), + }, + { + description: "string response", + handler: NewStatusEndpoint("ready"), + expectedRespCode: http.StatusOK, + expectedRespBody: bytes.NewBuffer([]byte("ready")), + }, + { + description: "JSON string response", + handler: NewStatusEndpoint(`{"status": "ok"}`), + expectedRespCode: http.StatusOK, + expectedRespBody: bytes.NewBuffer([]byte(`{"status": "ok"}`)), + }, + } - // Run - router.ServeHTTP(requestRecorder, req) + for _, tc := range testCases { + router := httprouter.New() + requestRecorder := httptest.NewRecorder() + router.GET("/status", tc.handler) + req, _ := http.NewRequest("GET", "/status", new(bytes.Buffer)) + router.ServeHTTP(requestRecorder, req) + assert.Equal(t, tc.expectedRespCode, requestRecorder.Code, "/status endpoint returned unexpected response", tc.description) + assert.Equal(t, tc.expectedRespBody, requestRecorder.Body, "/status reqturned unexpected response body", tc.description) + } - // Assert - assert.Equal(t, http.StatusNoContent, requestRecorder.Code, "/status endpoint should always return a 204. Got %d", requestRecorder.Code) } // TestSuccessfulPut asserts the *PuntHandler.handle() function both successfully @@ -417,8 +566,8 @@ func TestSuccessfulPut(t *testing.T) { }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) - router.GET("/cache", NewGetHandler(backend, m, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) + router.GET("/cache", NewGetHandler(backend, m, true, 0.0)) // Feed the tests input put request to the endpoint's handle putResponse := doPut(t, router, tc.inPutBody) @@ -507,7 +656,7 @@ func TestMalformedOrInvalidValue(t *testing.T) { }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) // Run test putResponse := doPut(t, router, tc.inPutBody) @@ -543,7 +692,7 @@ func TestNonSupportedType(t *testing.T) { &mockMetrics, }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) putResponse := doPut(t, router, requestBody) @@ -579,7 +728,7 @@ func TestPutNegativeTTL(t *testing.T) { }, } - testRouter.POST("/cache", NewPutHandler(testBackend, m, 10, true)) + testRouter.POST("/cache", NewPutHandler(testBackend, m, 10, true, 0.0)) recorder := httptest.NewRecorder() @@ -661,11 +810,18 @@ func TestCustomKey(t *testing.T) { }, } + preExistentDataInBackend := map[string]string{ + "non-36-char-key-maps-to-json": `json{"field":"value"}`, + "36-char-key-maps-to-non-xml-nor-json": `#@!*{"desc":"data got malformed and is not prefixed with 'xml' nor 'json' substring"}`, + "36-char-key-maps-to-actual-xml-value": "xmlxml data here", + } + for _, tgroup := range testGroups { for _, tc := range tgroup.testCases { // Instantiate prebid cache prod server with mock metrics and a mock metrics that // already contains some values - mockBackendWithValues := newMockBackend() + mockBackendWithValues, err := backends.NewMemoryBackendWithValues(preExistentDataInBackend) + assert.NoError(t, err, "Mock backend could not be created") mockMetrics := metricstest.CreateMockMetrics() m := &metrics.Metrics{ MetricEngines: []metrics.CacheMetrics{ @@ -674,7 +830,7 @@ func TestCustomKey(t *testing.T) { } router := httprouter.New() - putEndpointHandler := NewPutHandler(mockBackendWithValues, m, 10, tgroup.allowSettingKeys) + putEndpointHandler := NewPutHandler(mockBackendWithValues, m, 10, tgroup.allowSettingKeys, 0.0) router.POST("/cache", putEndpointHandler) recorder := httptest.NewRecorder() @@ -704,14 +860,14 @@ func TestCustomKey(t *testing.T) { func TestRequestReadError(t *testing.T) { // Setup server and mock body request reader - mockBackendWithValues := newMockBackend() + mockBackendWithValues, _ := backends.NewMemoryBackendWithValues(nil) mockMetrics := metricstest.CreateMockMetrics() m := &metrics.Metrics{ MetricEngines: []metrics.CacheMetrics{ &mockMetrics, }, } - putEndpointHandler := NewPutHandler(mockBackendWithValues, m, 10, false) + putEndpointHandler := NewPutHandler(mockBackendWithValues, m, 10, false, 0.0) router := httprouter.New() router.POST("/cache", putEndpointHandler) @@ -756,7 +912,7 @@ func TestTooManyPutElements(t *testing.T) { &mockMetrics, }, } - router.POST("/cache", NewPutHandler(backend, m, len(putElements)-1, true)) + router.POST("/cache", NewPutHandler(backend, m, len(putElements)-1, true, 0.0)) putResponse := doPut(t, router, reqBody) @@ -811,8 +967,8 @@ func TestMultiPutRequest(t *testing.T) { }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) - router.GET("/cache", NewGetHandler(backend, m, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) + router.GET("/cache", NewGetHandler(backend, m, true, 0.0)) rr := httptest.NewRecorder() @@ -862,7 +1018,7 @@ func TestBadPayloadSizePutError(t *testing.T) { &mockMetrics, }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) putResponse := doPut(t, router, reqBody) @@ -899,7 +1055,7 @@ func TestInternalPutClientError(t *testing.T) { // Use mock client that will return an error backendWithMetrics := decorators.LogMetrics(newErrorReturningBackend(), m) - router.POST("/cache", NewPutHandler(backendWithMetrics, m, 10, true)) + router.POST("/cache", NewPutHandler(backendWithMetrics, m, 10, true, 0.0)) // Run test putResponse := doPut(t, router, reqBody) @@ -973,7 +1129,7 @@ func TestEmptyPutRequests(t *testing.T) { }, } router := httprouter.New() - router.POST("/cache", NewPutHandler(backend, m, 10, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) rr := httptest.NewRecorder() // Create request everytime @@ -1016,7 +1172,7 @@ func TestPutClientDeadlineExceeded(t *testing.T) { &mockMetrics, }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) putResponse := doPut(t, router, reqBody) @@ -1312,8 +1468,8 @@ func benchmarkPutHandler(b *testing.B, testCase string) { }, } - router.POST("/cache", NewPutHandler(backend, m, 10, true)) - router.GET("/cache", NewGetHandler(backend, m, true)) + router.POST("/cache", NewPutHandler(backend, m, 10, true, 0.0)) + router.GET("/cache", NewGetHandler(backend, m, true, 0.0)) rr := httptest.NewRecorder() @@ -1325,16 +1481,6 @@ func benchmarkPutHandler(b *testing.B, testCase string) { } } -func newMockBackend() *backends.MemoryBackend { - backend := backends.NewMemoryBackend() - - backend.Put(context.Background(), "non-36-char-key-maps-to-json", `json{"field":"value"}`, 0) - backend.Put(context.Background(), "36-char-key-maps-to-non-xml-nor-json", `#@!*{"desc":"data got malformed and is not prefixed with 'xml' nor 'json' substring"}`, 0) - backend.Put(context.Background(), "36-char-key-maps-to-actual-xml-value", "xmlxml data here", 0) - - return backend -} - type faultyRequestBodyReader struct { mock.Mock } diff --git a/endpoints/routing/handler.go b/endpoints/routing/handler.go index 7e2a7c2c..c5141ef5 100644 --- a/endpoints/routing/handler.go +++ b/endpoints/routing/handler.go @@ -35,14 +35,14 @@ func NewPublicHandler(cfg config.Configuration, dataStore backends.Backend, appM } func addReadRoutes(cfg config.Configuration, dataStore backends.Backend, appMetrics *metrics.Metrics, router *httprouter.Router) { - router.GET("/", endpoints.NewIndexHandler(cfg.IndexResponse)) // Default route handler - router.GET("/status", endpoints.Status) // Determines whether the server is ready for more traffic. - router.GET("/cache", endpoints.NewGetHandler(dataStore, appMetrics, cfg.RequestLimits.AllowSettingKeys)) + router.GET("/", endpoints.NewIndexHandler(cfg.IndexResponse)) // Default route handler + router.GET("/status", endpoints.NewStatusEndpoint(cfg.StatusResponse)) // Determines whether the server is ready for more traffic. + router.GET("/cache", endpoints.NewGetHandler(dataStore, appMetrics, cfg.RequestLimits.AllowSettingKeys, cfg.RequestLogging.RefererSamplingRate)) router.GET("/version", endpoints.NewVersionEndpoint(version.Ver, version.Rev)) } func addWriteRoutes(cfg config.Configuration, dataStore backends.Backend, appMetrics *metrics.Metrics, router *httprouter.Router) { - router.POST("/cache", endpoints.NewPutHandler(dataStore, appMetrics, cfg.RequestLimits.MaxNumValues, cfg.RequestLimits.AllowSettingKeys)) + router.POST("/cache", endpoints.NewPutHandler(dataStore, appMetrics, cfg.RequestLimits.MaxNumValues, cfg.RequestLimits.AllowSettingKeys, cfg.RequestLogging.RefererSamplingRate)) } func handleCors(handler http.Handler) http.Handler { diff --git a/endpoints/sample-requests/get-endpoint/invalid/data-corrupted.json b/endpoints/sample-requests/get-endpoint/invalid/data-corrupted.json new file mode 100644 index 00000000..ec3a048a --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/invalid/data-corrupted.json @@ -0,0 +1,32 @@ +{ + "description": "Stored data 'value' field is not prefixed with 'json' nor 'xml', which we don't support. Expect error", + "config": { + "fake_backend": { + "stored_data": [ + { + "key": "36-char-uid-maps-to-actual-xml-value", + "value": "Not prefixed with 'json' nor 'xml' keywords" + } + ] + } + }, + "request": { + "query": "uuid=36-char-uid-maps-to-actual-xml-value" + }, + "expected_log_entries": [ + { + "message": "GET /cache uuid=36-char-uid-maps-to-actual-xml-value: Cache data was corrupted. Cannot determine type.", + "level": 2 + } + ], + "expected_metrics": [ + "RecordGetTotal", + "RecordGetError", + "RecordGetBackendDuration", + "RecordGetBackendTotal" + ], + "expected_output": { + "code": 500, + "expected_error_message": "GET /cache uuid=36-char-uid-maps-to-actual-xml-value: Cache data was corrupted. Cannot determine type.\n" + } +} diff --git a/endpoints/sample-requests/get-endpoint/invalid/key-not-found.json b/endpoints/sample-requests/get-endpoint/invalid/key-not-found.json new file mode 100644 index 00000000..0d83ad0e --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/invalid/key-not-found.json @@ -0,0 +1,27 @@ +{ + "description": "Gut request doesn't come with a UUID value in the URL query, expect MISSING_KEY error", + "config": { + "fake_backend": { + "stored_data": [ + { + "key": "36-char-uid-maps-to-actual-xml-value", + "value": "json{\"content\":5}" + } + ] + } + }, + "request": { + "query": "uuid=36-char-uuid-is-not-found-in-backend" + }, + "expected_metrics": [ + "RecordGetTotal", + "RecordGetBackendError", + "RecordGetBackendTotal", + "RecordKeyNotFoundError", + "RecordGetBadRequest" + ], + "expected_output": { + "code": 404, + "expected_error_message": "GET /cache uuid=36-char-uuid-is-not-found-in-backend: Key not found\n" + } +} diff --git a/endpoints/sample-requests/get-endpoint/invalid/missing-uuid.json b/endpoints/sample-requests/get-endpoint/invalid/missing-uuid.json new file mode 100644 index 00000000..5b8723a9 --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/invalid/missing-uuid.json @@ -0,0 +1,30 @@ +{ + "description": "Gut request doesn't come with a UUID value in the URL query, expect MISSING_KEY error", + "config": { + "fake_backend": { + "stored_data": [ + { + "key": "36-char-uid-maps-to-actual-xml-value", + "value": "json{\"content\":5}" + } + ] + } + }, + "request": { + "query": "uuid=" + }, + "expected_log_entries": [ + { + "message": "GET /cache: Missing required parameter uuid", + "level": 2 + } + ], + "expected_metrics": [ + "RecordGetTotal", + "RecordGetBadRequest" + ], + "expected_output": { + "code": 400, + "expected_error_message": "GET /cache: Missing required parameter uuid\n" + } +} diff --git a/endpoints/sample-requests/get-endpoint/invalid/uuid-length.json b/endpoints/sample-requests/get-endpoint/invalid/uuid-length.json new file mode 100644 index 00000000..b7b71f84 --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/invalid/uuid-length.json @@ -0,0 +1,20 @@ +{ + "description": "Gut request doesn't come with a UUID value in the URL query, expect MISSING_KEY error", + "request": { + "query": "uuid=non-36-char-uuid" + }, + "expected_log_entries": [ + { + "message": "GET /cache uuid=non-36-char-uuid: invalid uuid length", + "level": 2 + } + ], + "expected_metrics": [ + "RecordGetTotal", + "RecordGetBadRequest" + ], + "expected_output": { + "code": 404, + "expected_error_message": "GET /cache uuid=non-36-char-uuid: invalid uuid length\n" + } +} diff --git a/endpoints/sample-requests/get-endpoint/valid/element-found.json b/endpoints/sample-requests/get-endpoint/valid/element-found.json new file mode 100644 index 00000000..c63451bb --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/valid/element-found.json @@ -0,0 +1,26 @@ +{ + "description": "Get request sucessfully returns stored data", + "config": { + "fake_backend": { + "stored_data": [ + { + "key": "36-char-uid-maps-to-stored-xml-value", + "value": "xmlstored xml value" + } + ] + } + }, + "request": { + "query": "uuid=36-char-uid-maps-to-stored-xml-value" + }, + "expected_metrics": [ + "RecordGetBackendTotal", + "RecordGetDuration", + "RecordGetBackendDuration", + "RecordGetTotal" + ], + "expected_output": { + "code": 200, + "get_response": "stored xml value" + } +} diff --git a/endpoints/sample-requests/get-endpoint/valid/log-referrer-header.json b/endpoints/sample-requests/get-endpoint/valid/log-referrer-header.json new file mode 100644 index 00000000..ebc5637a --- /dev/null +++ b/endpoints/sample-requests/get-endpoint/valid/log-referrer-header.json @@ -0,0 +1,36 @@ +{ + "description": "Prebid Cache configured to log the referer header of 100% of incoming requests. Referer successfully logged.", + "config": { + "fake_backend": { + "stored_data": [ + { + "key": "36-char-uid-maps-to-stored-xml-value", + "value": "xmlstored xml value" + } + ] + }, + "referer_sampling_rate": 1.0 + }, + "request": { + "query": "uuid=36-char-uid-maps-to-stored-xml-value", + "headers": { + "Referer": [ "anyreferer" ] + } + }, + "expected_log_entries": [ + { + "message": "GET request Referer header: anyreferer", + "level": 4 + } + ], + "expected_metrics": [ + "RecordGetBackendTotal", + "RecordGetDuration", + "RecordGetBackendDuration", + "RecordGetTotal" + ], + "expected_output": { + "code": 200, + "get_response": "stored xml value" + } +} diff --git a/endpoints/sample-requests/put-endpoint/backends/ignite/record-exists-error.json b/endpoints/sample-requests/put-endpoint/backends/ignite/record-exists-error.json new file mode 100644 index 00000000..68035f67 --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/backends/ignite/record-exists-error.json @@ -0,0 +1,31 @@ +{ + "description": "Ignite's way of telling us there a key already exists is to return a response field with a value of false. Expect a RECORD_EXISTS error", + "config": { + "fake_backend": { + "storage_type": "ignite", + "server_response": "{\"successStatus\":0,\"error\":\"\",\"response\":false}" + } + }, + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "xmlanother_XML" + } + ] + } + }, + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendXml", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendError", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "expected_error_message": "{\"responses\":[{\"uuid\":\"\"}]}" + } +} diff --git a/endpoints/sample-requests/put-endpoint/backends/redis/invalid-redis-server-error.json b/endpoints/sample-requests/put-endpoint/backends/redis/invalid-redis-server-error.json new file mode 100644 index 00000000..bea87afd --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/backends/redis/invalid-redis-server-error.json @@ -0,0 +1,41 @@ +{ + "description": "Redis server side error. Expect 500, an both an error message and a log entry", + "config": { + "fake_backend": { + "storage_type": "redis", + "throw_error_message": "Redis server side error" + } + }, + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "xmlanother_XML" + } + ] + } + }, + "expected_log_entries": [ + { + "message": "POST /cache Error while writing to the back-end: Redis server side error", + "level": 2 + }, + { + "message": "POST /cache had an unexpected error:Redis server side error", + "level": 2 + } + ], + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendXml", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendError", + "RecordPutError" + ], + "expected_output": { + "code": 500, + "expected_error_message": "Redis server side error\n" + } +} diff --git a/endpoints/sample-requests/put-endpoint/backends/redis/valid-overwrite.json b/endpoints/sample-requests/put-endpoint/backends/redis/valid-overwrite.json new file mode 100644 index 00000000..dc29e502 --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/backends/redis/valid-overwrite.json @@ -0,0 +1,45 @@ +{ + "description": "Put request tries to overwrite data stored in Redis. Expect value to not be overwritten and a blank UUID in response", + "config": { + "allow_setting_keys": true, + "storage_type": "redis", + "fake_backend": { + "stored_data": [ + { + "key": "the-custom-thirty-six-character-uuid", + "value": "original_XML" + } + ] + } + }, + "request": { + "body": { + "puts": [ + { + "key": "the-custom-thirty-six-character-uuid", + "type": "xml", + "value": "NEW_XML" + } + ] + } + }, + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendXml", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendError", + "RecordPutKeyProvided", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "" + } + ] + } + } +} diff --git a/endpoints/sample-requests/put-endpoint/backends/redis/valid-success-true.json b/endpoints/sample-requests/put-endpoint/backends/redis/valid-success-true.json new file mode 100644 index 00000000..bcd4acdb --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/backends/redis/valid-success-true.json @@ -0,0 +1,38 @@ +{ + "description": "Successful insertion of element in fake redis backend", + "config": { + "fake_backend": { + "storage_type": "redis", + "throw_bool": true, + "throw_error_message": "redis: nil" + } + }, + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "info<\\/VAST>\r\n" + } + ] + } + }, + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendXml", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendDuration", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } + } +} diff --git a/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-included.json b/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-included.json index f0a22740..7381079a 100644 --- a/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-included.json +++ b/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-included.json @@ -1,19 +1,20 @@ { "description": "Prebid Cache has been configured to allow to store elements under custom keys. Store data under element-defined 'key' value.", - "serverConfig": { + "config": { "allow_setting_keys": true }, - "putRequest": { - "puts":[ - { - "type":"xml", - "ttlseconds":60, - "value":"other_XML_content", - "key":"the-custom-thrity-six-character-uuid" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "other_XML_content", + "key": "the-custom-thirty-six-character-uuid" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutKeyProvided", "RecordPutBackendXml", @@ -22,9 +23,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "the-custom-thrity-six-character-uuid"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "the-custom-thirty-six-character-uuid" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-missing.json b/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-missing.json index b31b56ee..b34f6583 100644 --- a/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-missing.json +++ b/endpoints/sample-requests/put-endpoint/custom-keys/allowed/key-field-missing.json @@ -1,18 +1,19 @@ { "description": "Prebid Cache has been configured to allow to store elements under custom keys but element came with empty 'key' field. Store under random UUID", - "serverConfig": { + "config": { "allow_setting_keys": true }, - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 60, - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -20,9 +21,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/custom-keys/not-allowed/key-field-included.json b/endpoints/sample-requests/put-endpoint/custom-keys/not-allowed/key-field-included.json index 68894318..d9f4b3e1 100644 --- a/endpoints/sample-requests/put-endpoint/custom-keys/not-allowed/key-field-included.json +++ b/endpoints/sample-requests/put-endpoint/custom-keys/not-allowed/key-field-included.json @@ -1,16 +1,17 @@ { "description": "Put request wants to store element under a custom key but custom keys are not allowed in Prebid Cache's config. Store under a random UUID", - "putRequest": { - "puts":[ - { - "type":"xml", - "ttlseconds":60, - "value":"other_XML_content", - "key":"the-custom-thrity-six-character-uuid" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "other_XML_content", + "key": "the-custom-thirty-six-character-uuid" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -18,9 +19,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/invalid-number-of-elements/puts-max-num-values.json b/endpoints/sample-requests/put-endpoint/invalid-number-of-elements/puts-max-num-values.json index ef438a08..0907c834 100644 --- a/endpoints/sample-requests/put-endpoint/invalid-number-of-elements/puts-max-num-values.json +++ b/endpoints/sample-requests/put-endpoint/invalid-number-of-elements/puts-max-num-values.json @@ -1,17 +1,30 @@ { "description": "Put request wants to store more elements than allowed in the 'max_num_values' configuration. Don't store and return error", - "serverConfig": { - "max_num_values": 1 + "config": { + "max_num_values": 1 }, - "putRequest": { - "puts": [ - {"type":"xml","ttlseconds":5,"value":"__video_info__<\\/VAST>\r\n"}, - {"type":"json","ttlseconds":5,"value":"{\"field\":100}"} - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "ttlseconds": 5, + "value": "__video_info__<\\/VAST>\r\n" + }, + { + "type": "json", + "ttlseconds": 5, + "value": "{\"field\":100}" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBadRequest" ], - "expectedErrorMessage": "More keys than allowed: 1\n" + "expected_output": { + "code": 400, + "expected_error_message": "More keys than allowed: 1\n" + } } diff --git a/endpoints/sample-requests/put-endpoint/invalid-types/type-missing.json b/endpoints/sample-requests/put-endpoint/invalid-types/type-missing.json index 9db3f721..95f931a3 100644 --- a/endpoints/sample-requests/put-endpoint/invalid-types/type-missing.json +++ b/endpoints/sample-requests/put-endpoint/invalid-types/type-missing.json @@ -1,14 +1,15 @@ { - "description": "Prebid Cache only allows to store JSON or XML types and the type field is required. Respond with error", - "putRequest": { - "puts": [ - { - "ttlseconds": 60, - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "description": "Request is missing the 'type' field. Respond with error", + "request": { + "body": { + "puts": [ + { + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedLogEntries": [ + "expected_log_entries": [ { "message": "POST /cache Error while writing to the back-end: Type must be one of [\"json\", \"xml\"]. Found ''", "level": 2 @@ -18,9 +19,12 @@ "level": 2 } ], - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBadRequest" ], - "expectedErrorMessage": "Type must be one of [\"json\", \"xml\"]. Found ''\n" + "expected_output": { + "code": 400, + "expected_error_message": "Type must be one of [\"json\", \"xml\"]. Found ''\n" + } } diff --git a/endpoints/sample-requests/put-endpoint/invalid-types/type-unknown.json b/endpoints/sample-requests/put-endpoint/invalid-types/type-unknown.json index 790b06fb..79a13ca6 100644 --- a/endpoints/sample-requests/put-endpoint/invalid-types/type-unknown.json +++ b/endpoints/sample-requests/put-endpoint/invalid-types/type-unknown.json @@ -1,15 +1,16 @@ { "description": "Prebid Cache only allows to store JSON or XML types and the type 'unknown' is not supported. Respond with error", - "putRequest": { - "puts": [ - { - "type": "unknown", - "ttlseconds": 60, - "value": "some-value" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "unknown", + "value": "some-value" + } + ] + } }, - "expectedLogEntries": [ + "expected_log_entries": [ { "message": "POST /cache Error while writing to the back-end: Type must be one of [\"json\", \"xml\"]. Found 'unknown'", "level": 2 @@ -19,9 +20,12 @@ "level": 2 } ], - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBadRequest" ], - "expectedErrorMessage": "Type must be one of [\"json\", \"xml\"]. Found 'unknown'\n" + "expected_output": { + "code": 400, + "expected_error_message": "Type must be one of [\"json\", \"xml\"]. Found 'unknown'\n" + } } diff --git a/endpoints/sample-requests/put-endpoint/invalid-value/value-greater-than-max.json b/endpoints/sample-requests/put-endpoint/invalid-value/value-greater-than-max.json index 9acd14a9..abbe9b5b 100644 --- a/endpoints/sample-requests/put-endpoint/invalid-value/value-greater-than-max.json +++ b/endpoints/sample-requests/put-endpoint/invalid-value/value-greater-than-max.json @@ -1,28 +1,29 @@ { "description": "Put request wants to store an element with a size that exceeds the 'max_size_bytes' value. Don't store and return error", - "serverConfig": { - "max_size_bytes": 5 + "config": { + "max_size_bytes": 1 }, - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 5, - "value": "\r\n<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "\r\n<\\/VAST>\r\n" + } + ] + } }, - "expectedLogEntries": [ + "expected_log_entries": [ { - "message": "POST /cache Error while writing to the back-end: POST /cache element 0 exceeded max size: Payload size 73 exceeded max 5", + "message": "POST /cache Error while writing to the back-end: POST /cache element 0 exceeded max size: Payload size 73 exceeded max 1", "level": 2 }, { - "message": "POST /cache had an unexpected error:POST /cache element 0 exceeded max size: Payload size 73 exceeded max 5", + "message": "POST /cache had an unexpected error:POST /cache element 0 exceeded max size: Payload size 73 exceeded max 1", "level": 2 } ], - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -30,5 +31,8 @@ "RecordPutBackendError", "RecordPutBadRequest" ], - "expectedErrorMessage": "POST /cache element 0 exceeded max size: Payload size 73 exceeded max 5\n" + "expected_output": { + "code": 400, + "expected_error_message": "POST /cache element 0 exceeded max size: Payload size 73 exceeded max 1\n" + } } diff --git a/endpoints/sample-requests/put-endpoint/invalid-value/value-missing.json b/endpoints/sample-requests/put-endpoint/invalid-value/value-missing.json index 9523215c..8325a6a6 100644 --- a/endpoints/sample-requests/put-endpoint/invalid-value/value-missing.json +++ b/endpoints/sample-requests/put-endpoint/invalid-value/value-missing.json @@ -1,14 +1,15 @@ { - "description": "Prebid Cache does not allow the storage of empty values. Respond with error", - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 60 - } - ] + "description": "Prebid Cache returns an error if a request doesn't come with a 'value' field.", + "request": { + "body": { + "puts": [ + { + "type": "xml" + } + ] + } }, - "expectedLogEntries": [ + "expected_log_entries": [ { "message": "POST /cache Error while writing to the back-end: Missing value.", "level": 2 @@ -18,9 +19,12 @@ "level": 2 } ], - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBadRequest" ], - "expectedErrorMessage": "Missing value.\n" + "expected_output": { + "code": 400, + "expected_error_message": "Missing value.\n" + } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/log-referrer-header.json b/endpoints/sample-requests/put-endpoint/valid-whole/log-referrer-header.json new file mode 100644 index 00000000..e1c679cb --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/valid-whole/log-referrer-header.json @@ -0,0 +1,43 @@ +{ + "description": "Prebid Cache configured to log the referer header of 100% of incoming requests. Referer successfully logged.", + "config": { + "referer_sampling_rate": 1.0 + }, + "request": { + "body": { + "puts": [ + { + "type": "json", + "value": "{\"field\":100}" + } + ] + }, + "headers": { + "Referer": [ "anyreferer" ] + } + }, + "expected_log_entries": [ + { + "message": "POST request Referer header: anyreferer", + "level": 4 + } + ], + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendJson", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendDuration", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } + } +} diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/multiple-elements-to-store.json b/endpoints/sample-requests/put-endpoint/valid-whole/multiple-elements-to-store.json index 407d5848..c0b9ad12 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/multiple-elements-to-store.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/multiple-elements-to-store.json @@ -1,23 +1,25 @@ { - "description": "Put request wants to store multiple elements but no more than the maximum allowed by the 'max_num_values' config. Store them under a random UUIDs", - "serverConfig": { + "description": "Put request stores multiple elements without going over the 'max_num_values' cap of its config.", + "config": { "max_num_values": 2 }, - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 60, - "value": "__video_info__<\\/VAST>\r\n" - }, - { - "type": "json", - "ttlseconds": 60, - "value": "{\"an_int_field\": 1}" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "ttlseconds": 60, + "value": "__video_info__<\\/VAST>\r\n" + }, + { + "type": "json", + "ttlseconds": 60, + "value": "{\"an_int_field\": 1}" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendJson", @@ -26,10 +28,17 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"}, - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + }, + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/no-elements-to-store.json b/endpoints/sample-requests/put-endpoint/valid-whole/no-elements-to-store.json index f5e20835..bf2600f9 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/no-elements-to-store.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/no-elements-to-store.json @@ -1,13 +1,18 @@ { "description": "Put request with empty 'puts' array does not return an error, we simply respond with an emtpy 'responses' array.", - "putRequest": { - "puts": [] + "request": { + "body": { + "puts": [] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/record-exists.json b/endpoints/sample-requests/put-endpoint/valid-whole/record-exists.json new file mode 100644 index 00000000..8130ec87 --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/valid-whole/record-exists.json @@ -0,0 +1,44 @@ +{ + "description": "Prebid Cache doesn't allow stored entries to be rewritten. Return a non-error response body with blank 'uuid' value", + "config": { + "allow_setting_keys": true, + "fake_backend": { + "stored_data": [ + { + "key": "the-custom-thirty-six-character-uuid", + "value": "original_XML" + } + ] + } + }, + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "updated_XML", + "key": "the-custom-thirty-six-character-uuid" + } + ] + } + }, + "expected_metrics": [ + "RecordPutTotal", + "RecordPutKeyProvided", + "RecordPutBackendXml", + "RecordPutBackendTTLSeconds", + "RecordPutBackendError", + "RecordPutBackendSize", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "" + } + ] + } + } +} diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/single-element-to-store.json b/endpoints/sample-requests/put-endpoint/valid-whole/single-element-to-store.json index 9d541889..a103dbbc 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/single-element-to-store.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/single-element-to-store.json @@ -1,15 +1,16 @@ { "description": "Put request wants to store a single element of valid type no larger than the maximum size allowed. Store under a random UUID", - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 60, - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -17,9 +18,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/ttl-missing.json b/endpoints/sample-requests/put-endpoint/valid-whole/ttl-missing.json index a25e896a..f5cfd4d8 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/ttl-missing.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/ttl-missing.json @@ -1,14 +1,16 @@ { "description": "Object to store doesn't come with a time-to-live value. Prebid Cache allows for a zero time-to-live value and responds with a random UUID.", - "putRequest": { - "puts": [ - { - "type": "xml", - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -16,9 +18,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/ttl-more-than-max.json b/endpoints/sample-requests/put-endpoint/valid-whole/ttl-more-than-max.json index 5e51d947..5ecca6f9 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/ttl-more-than-max.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/ttl-more-than-max.json @@ -1,18 +1,20 @@ { "description": "Put request wants to store object for more seconds than Prebid Cache maximum. Cap at the 'max_ttl_seconds' value and store successfully", - "serverConfig": { + "config": { "max_ttl_seconds": 5 }, - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 6, - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "ttlseconds": 6, + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -20,9 +22,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/uuid-scenarios-in-response.json b/endpoints/sample-requests/put-endpoint/valid-whole/uuid-scenarios-in-response.json new file mode 100644 index 00000000..b1d9a5a7 --- /dev/null +++ b/endpoints/sample-requests/put-endpoint/valid-whole/uuid-scenarios-in-response.json @@ -0,0 +1,68 @@ +{ + "description": "Put request wants to store multiple elements but no more than the maximum allowed by the 'max_num_values' config. Store them under a random UUIDs", + "config": { + "max_num_values": 3, + "max_size_bytes": 100, + "fake_backend": { + "stored_data": [ + { + "key": "uuid-stored-value-we-want-overwriten", + "value": "original_XML", + "type": "xml", + "ttlseconds": 60 + } + ] + }, + "allow_setting_keys": true + }, + "request": { + "body": { + "puts": [ + { + "type": "json", + "ttlseconds": 60, + "value": "{\"description\": \"value will be stored under random UUID\"}" + }, + { + "key": "the-custom-thirty-six-character-uuid", + "type": "json", + "ttlseconds": 60, + "value": "{\"description\": \"value will be stored under custom UUID\"}" + }, + { + "key": "uuid-stored-value-we-want-overwriten", + "value": "XML meant to overwrite data already stored under this key", + "type": "xml", + "ttlseconds": 60 + } + ] + } + }, + "expected_metrics": [ + "RecordPutTotal", + "RecordPutBackendXml", + "RecordPutBackendJson", + "RecordPutBackendSize", + "RecordPutBackendTTLSeconds", + "RecordPutBackendDuration", + "RecordPutKeyProvided", + "RecordPutBackendError", + "RecordPutDuration" + ], + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + }, + { + "uuid": "the-custom-thirty-six-character-uuid" + }, + { + "uuid": "" + } + ] + } + } +} diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-json.json b/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-json.json index a6c0fff5..1faae71d 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-json.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-json.json @@ -1,15 +1,16 @@ { "description": "Store JSON type value, which Prebid Cache allows. Store under a random UUID", - "putRequest": { - "puts": [ - { - "type": "json", - "ttlseconds": 60, - "value": "{\"field\":100}" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "json", + "value": "{\"field\":100}" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendJson", "RecordPutBackendSize", @@ -17,9 +18,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-xml.json b/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-xml.json index a7b62b70..9b218e5c 100644 --- a/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-xml.json +++ b/endpoints/sample-requests/put-endpoint/valid-whole/valid-type-xml.json @@ -1,15 +1,16 @@ { "description": "Prebid Cache allows the storage of XML type values. Store under a random UUID because the 'key' field missing and custom keys are not allowed anyways", - "putRequest": { - "puts": [ - { - "type": "xml", - "ttlseconds": 60, - "value": "__video_info__<\\/VAST>\r\n" - } - ] + "request": { + "body": { + "puts": [ + { + "type": "xml", + "value": "__video_info__<\\/VAST>\r\n" + } + ] + } }, - "expectedMetrics": [ + "expected_metrics": [ "RecordPutTotal", "RecordPutBackendXml", "RecordPutBackendSize", @@ -17,9 +18,14 @@ "RecordPutBackendDuration", "RecordPutDuration" ], - "expectedResponse": { - "responses": [ - {"uuid": "random"} - ] + "expected_output": { + "code": 200, + "put_response": { + "responses": [ + { + "uuid": "random" + } + ] + } } } diff --git a/endpoints/status.go b/endpoints/status.go index 3186f75b..9c95224f 100644 --- a/endpoints/status.go +++ b/endpoints/status.go @@ -6,9 +6,17 @@ import ( "github.com/julienschmidt/httprouter" ) -// Status is the handler function of the "/status" endpoint -func Status(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - // We might want more logic here eventually... but for now, we're ok to serve more traffic as - // long as the server responds. - w.WriteHeader(http.StatusNoContent) +// NewStatusEndpoint returns a handler which writes the given response when the app is ready to serve requests. +func NewStatusEndpoint(response string) httprouter.Handle { + // Today, the app always considers itself ready to serve requests. + if response == "" { + return func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { + w.WriteHeader(http.StatusNoContent) + } + } + + responseBytes := []byte(response) + return func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { + w.Write(responseBytes) + } } diff --git a/go.mod b/go.mod index c3b710c3..cd8fdb74 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,9 @@ module github.com/prebid/prebid-cache -go 1.16 +go 1.19 require ( - github.com/aerospike/aerospike-client-go v4.0.0+incompatible - github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/aerospike/aerospike-client-go/v6 v6.7.0 github.com/didip/tollbooth/v6 v6.1.2 github.com/go-redis/redis/v8 v8.11.5 github.com/gocql/gocql v1.0.0 @@ -12,13 +11,52 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gomemcache v0.0.0-20210709172713-c1c93e4523ee github.com/julienschmidt/httprouter v1.3.0 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/rs/cors v1.8.2 - github.com/sirupsen/logrus v1.4.2 + github.com/rs/cors v1.11.0 + github.com/sirupsen/logrus v1.6.0 github.com/spf13/viper v1.11.0 github.com/stretchr/testify v1.7.1 github.com/vrischmann/go-metrics-influxdb v0.1.1 - github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-pkgz/expirable-cache v0.0.3 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect + golang.org/x/text v0.3.8 // indirect + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect ) diff --git a/go.sum b/go.sum index 97cc38e1..b241657e 100644 --- a/go.sum +++ b/go.sum @@ -17,30 +17,14 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -54,52 +38,33 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/aerospike/aerospike-client-go v4.0.0+incompatible h1:YjjDU42LQBGozElzE87UvdOsIC8y6i8ntbqeCkHBanY= -github.com/aerospike/aerospike-client-go v4.0.0+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/aerospike/aerospike-client-go/v6 v6.7.0 h1:La2669CfR3VgwGtgqeIB1U6EUxQOWyFoyQPM/WTM8ws= +github.com/aerospike/aerospike-client-go/v6 v6.7.0/go.mod h1:Do5/flmgSo2X32YLGAYd6o5e/U2gOSpgEhrIGyOS3UI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -112,25 +77,20 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6dksJLXQDc= github.com/go-pkgz/expirable-cache v0.0.3/go.mod h1:+IauqN00R2FqNRLCLA+X5YljQJrwB179PfiAoMPlTlQ= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= @@ -139,16 +99,13 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c= github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -156,8 +113,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -173,7 +128,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -189,19 +143,15 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gomemcache v0.0.0-20210709172713-c1c93e4523ee h1:9B0tP0aMQlIhLAhEfwGR41hnnsB294Wt6GHxjidtEm8= github.com/google/gomemcache v0.0.0-20210709172713-c1c93e4523ee/go.mod h1:omwuVXMR08DGQo+8KNjYAlfsoTL7O9OBJbYUlawWcyQ= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -212,60 +162,25 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -273,40 +188,25 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/k0kubun/pp/v3 v3.1.0/go.mod h1:vIrP5CF0n78pKHm2Ku6GVerpZBJvscg48WepUYEk2gw= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -315,6 +215,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -322,15 +223,10 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= @@ -342,39 +238,40 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= @@ -392,13 +289,11 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vrischmann/go-metrics-influxdb v0.1.1 h1:xneKFRjsS4BiVYvAKaM/rOlXYd1pGHksnES0ECCJLgo= github.com/vrischmann/go-metrics-influxdb v0.1.1/go.mod h1:q7YC8bFETCYopXRMtUvQQdLaoVhpsEwvQS2zZEYCqg8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -406,32 +301,23 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= +github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -455,7 +341,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -482,7 +367,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -500,22 +384,14 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -525,17 +401,7 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -546,15 +412,15 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -564,19 +430,14 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -587,6 +448,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -595,48 +458,30 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -655,7 +500,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -680,7 +524,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -690,20 +533,14 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -723,23 +560,6 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -770,7 +590,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -783,42 +602,7 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -832,22 +616,9 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -860,9 +631,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -878,16 +650,14 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/metrics/metricstest/metrics_mock.go b/metrics/metricstest/metrics_mock.go index 727dec6c..6fbd8b4e 100644 --- a/metrics/metricstest/metrics_mock.go +++ b/metrics/metricstest/metrics_mock.go @@ -1,96 +1,48 @@ package metricstest import ( + "reflect" "testing" "time" "github.com/prebid/prebid-cache/config" + "github.com/prebid/prebid-cache/metrics" "github.com/stretchr/testify/mock" ) func AssertMetrics(t *testing.T, expectedMetrics []string, actualMetrics MockMetrics) { t.Helper() - // All the names of our metric interface methods - allMetrics := map[string]struct{}{ - "RecordAcceptConnectionErrors": {}, - "RecordCloseConnectionErrors": {}, - "RecordConnectionClosed": {}, - "RecordConnectionOpen": {}, - "RecordGetBackendDuration": {}, - "RecordGetBackendError": {}, - "RecordGetBackendTotal": {}, - "RecordGetBadRequest": {}, - "RecordGetDuration": {}, - "RecordGetError": {}, - "RecordGetTotal": {}, - "RecordKeyNotFoundError": {}, - "RecordMissingKeyError": {}, - "RecordPutBackendDuration": {}, - "RecordPutBackendError": {}, - "RecordPutBackendInvalid": {}, - "RecordPutBackendJson": {}, - "RecordPutBackendSize": {}, - "RecordPutBackendTTLSeconds": {}, - "RecordPutBackendXml": {}, - "RecordPutBadRequest": {}, - "RecordPutDuration": {}, - "RecordPutError": {}, - "RecordPutKeyProvided": {}, - "RecordPutTotal": {}, + m := metrics.Metrics{} + mt := reflect.TypeOf(m) + allMetricsNames := make(map[string]struct{}, mt.NumMethod()) + metricsLogged := make(map[string]struct{}, mt.NumMethod()) + + // List methods of the Metrics interface into map + for i := 0; i < mt.NumMethod(); i++ { + allMetricsNames[mt.Method(i).Name] = struct{}{} } // Assert the metrics found in the expectedMetrics array where called. If a given element is not a known metric, throw error. for _, metricName := range expectedMetrics { - _, exists := allMetrics[metricName] + _, exists := allMetricsNames[metricName] if exists { actualMetrics.AssertCalled(t, metricName) - delete(allMetrics, metricName) + metricsLogged[metricName] = struct{}{} } else { t.Errorf("Cannot assert unrecognized metric '%s' was called", metricName) } } // Assert the metrics not found in the expectedMetrics array where not called - for metricName := range allMetrics { - actualMetrics.AssertNotCalled(t, metricName) + for metric := range allMetricsNames { + // Assert that metrics not found in metricsLogged were effectively not logged + if _, metricWasLogged := metricsLogged[metric]; !metricWasLogged { + actualMetrics.AssertNotCalled(t, metric) + } } } -// MetricsRecorded is a structure used to document the exepected metrics to be recorded when running unit tests -type MetricsRecorded struct { - // Connection metrics - RecordAcceptConnectionErrors int64 `json:"RecordAcceptConnectionErrors"` - RecordCloseConnectionErrors int64 `json:"RecordCloseConnectionErrors"` - RecordConnectionClosed int64 `json:"RecordConnectionClosed"` - RecordConnectionOpen int64 `json:"RecordConnectionOpen"` - - // Get metrics - RecordGetBackendDuration float64 `json:"RecordGetBackendDuration"` - RecordGetBackendError int64 `json:"RecordGetBackendError"` - RecordGetBackendTotal int64 `json:"RecordGetBackendTotal"` - RecordGetBadRequest int64 `json:"RecordGetBadRequest"` - RecordGetDuration float64 `json:"RecordGetDuration"` - RecordGetError int64 `json:"RecordGetError"` - RecordGetTotal int64 `json:"RecordGetTotal"` - - // Put metrics - RecordKeyNotFoundError int64 `json:"RecordKeyNotFoundError"` - RecordMissingKeyError int64 `json:"RecordMissingKeyError"` - RecordPutBackendDuration float64 `json:"RecordPutBackendDuration"` - RecordPutBackendError int64 `json:"RecordPutBackendError"` - RecordPutBackendInvalid int64 `json:"RecordPutBackendInvalid"` - RecordPutBackendJson int64 `json:"RecordPutBackendJson"` - RecordPutBackendSize float64 `json:"RecordPutBackendSize"` - RecordPutBackendTTLSeconds float64 `json:"RecordPutBackendTTLSeconds"` - RecordPutBackendXml int64 `json:"RecordPutBackendXml"` - RecordPutBadRequest int64 `json:"RecordPutBadRequest"` - RecordPutDuration float64 `json:"RecordPutDuration"` - RecordPutError int64 `json:"RecordPutError"` - RecordPutKeyProvided int64 `json:"RecordPutKeyProvided"` - RecordPutTotal int64 `json:"RecordPutTotal"` -} - func CreateMockMetrics() MockMetrics { mockMetrics := MockMetrics{} diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index b9c466b4..10a8624b 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -161,7 +161,14 @@ func CreatePrometheusMetrics(cfg config.PrometheusMetrics) *PrometheusMetrics { // Should be the equivalent of the following influx collectors // go metrics.CaptureRuntimeMemStats(m.Registry, flushTime) // go metrics.CaptureDebugGCStats(m.Registry, flushTime) - collectorNamespace := fmt.Sprintf("%s_%s", cfg.Namespace, cfg.Subsystem) + collectorNamespace := "" + if len(cfg.Namespace) > 0 { + collectorNamespace += fmt.Sprintf("%s_", cfg.Namespace) + } + if len(cfg.Subsystem) > 0 { + collectorNamespace += fmt.Sprintf("%s", cfg.Subsystem) + } + promMetrics.Registry.MustRegister( prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{Namespace: collectorNamespace}), ) diff --git a/server/server.go b/server/server.go index 4f75d5a7..6f103e2c 100644 --- a/server/server.go +++ b/server/server.go @@ -74,20 +74,37 @@ func Listen(cfg config.Configuration, publicHandler http.Handler, adminHandler h return } +// newAdminServer returns an http.Server with the AdminPort and RequestLimits.MaxHeaderBytes +// from Prebid Cache's config files or environment variables. If RequestLimits.MaxHeaderBytes +// is zero or was not specified, the http library's DefaultMaxHeaderBytes value of 1 MB +// is set instead. func newAdminServer(cfg config.Configuration, handler http.Handler) *http.Server { - return &http.Server{ + server := &http.Server{ Addr: ":" + strconv.Itoa(cfg.AdminPort), Handler: handler, } + if cfg.RequestLimits.MaxHeaderSize > 0 { + server.MaxHeaderBytes = cfg.RequestLimits.MaxHeaderSize + } + return server } +// newMainServer returns an http.Server with the configured Port and +// RequestLimits.MaxHeaderBytes values specified in Prebid Cache's config files +// or environment variables. If RequestLimits.MaxHeaderBytes is zero or was not +// specified, 1 MB, which is the value of the http library's DefaultMaxHeaderBytes, +// is set instead. func newMainServer(cfg config.Configuration, handler http.Handler) *http.Server { - return &http.Server{ + server := &http.Server{ Addr: ":" + strconv.Itoa(cfg.Port), Handler: handler, ReadTimeout: 15 * time.Second, WriteTimeout: 15 * time.Second, } + if cfg.RequestLimits.MaxHeaderSize > 0 { + server.MaxHeaderBytes = cfg.RequestLimits.MaxHeaderSize + } + return server } func runServer(server *http.Server, name string, listener net.Listener) { diff --git a/utils/errors.go b/utils/errors.go index f7cee1cf..ce1b5189 100644 --- a/utils/errors.go +++ b/utils/errors.go @@ -18,6 +18,7 @@ const ( KEY_NOT_FOUND // GET http.StatusNotFound 404 KEY_LENGTH // GET http.StatusNotFound 404 UNKNOWN_STORED_DATA_TYPE // GET http.StatusInternalServerError 500 + GET_INTERNAL_SERVER // GET http.StatusInternalServerError 500 PUT_INTERNAL_SERVER // PUT http.StatusInternalServerError 500 MARSHAL_RESPONSE // PUT http.StatusInternalServerError 500 PUT_DEADLINE_EXCEEDED // PUT HttpDependencyTimeout 597 @@ -38,6 +39,7 @@ var errToStatusCodes map[int]int = map[int]int{ MISSING_VALUE: http.StatusBadRequest, BAD_PAYLOAD_SIZE: http.StatusBadRequest, UNKNOWN_STORED_DATA_TYPE: http.StatusInternalServerError, + GET_INTERNAL_SERVER: http.StatusInternalServerError, PUT_INTERNAL_SERVER: http.StatusInternalServerError, MARSHAL_RESPONSE: http.StatusInternalServerError, KEY_NOT_FOUND: http.StatusNotFound, diff --git a/utils/utils.go b/utils/utils.go index 55b51a94..f9da4d0d 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1,6 +1,8 @@ package utils import ( + "math/rand" + "github.com/gofrs/uuid" ) @@ -9,3 +11,13 @@ func GenerateRandomID() (string, error) { u2, err := uuid.NewV4() return u2.String(), err } + +func RandomPick(pickProbability float64) bool { + if pickProbability == 0.0 { + return false + } + if pickProbability == 1.0 { + return true + } + return rand.Float64() < pickProbability +} diff --git a/utils/utils_test.go b/utils/utils_test.go new file mode 100644 index 00000000..1d8d0a50 --- /dev/null +++ b/utils/utils_test.go @@ -0,0 +1,31 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRandomPick(t *testing.T) { + testCases := []struct { + name string + inPickProbability float64 + expected bool + }{ + { + name: "zero", // Zero probablity of true, expect false + inPickProbability: 0.00, + expected: false, + }, + { + name: "one", // 100% probability of true, expect true + inPickProbability: 1.00, + expected: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, RandomPick(tc.inPickProbability)) + }) + } +} diff --git a/version/version.go b/version/version.go index c7b7bfc4..8103cdb1 100644 --- a/version/version.go +++ b/version/version.go @@ -2,12 +2,16 @@ package version // Ver holds the version derived from the latest git tag // Populated using: -// go build -ldflags "-X github.com/prebid/prebid-cache/version.Ver=`git describe --tags`" +// +// go build -ldflags "-X github.com/prebid/prebid-cache/version.Ver=`git describe --tags`" +// // Populated automatically at build / releases in the Docker image var Ver string // Rev holds binary revision string // Populated using: -// go build -ldflags "-X github.com/prebid/prebid-cache/version.Rev=`git rev-parse HEAD`" +// +// go build -ldflags "-X github.com/prebid/prebid-cache/version.Rev=`git rev-parse HEAD`" +// // Populated automatically at build / releases in the Docker image var Rev string