From e291bc6fc8d0f8615b0eff49947f82a1b71e2f64 Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Fri, 20 Mar 2026 16:18:38 -0300 Subject: [PATCH 01/10] fix: update GPV2 block number --- src/data.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data.ts b/src/data.ts index 8aed501..dea450e 100644 --- a/src/data.ts +++ b/src/data.ts @@ -46,7 +46,7 @@ export const CoWShedFactoryContract = { export const GPV2_SETTLEMENT_DEPLOYMENTS = { mainnet: { address: "0x9008D19f58AAbD9eD0D60971565AA8510560ab41" as const, - startBlock: 17883049, + startBlock: 23812751, // AaveV3AdapterFactory deployment block (Nov 16, 2025) }, } as const; From 4be95efa429385e5ff977d1a64902f1d99a2748c Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Mon, 23 Mar 2026 16:02:03 -0300 Subject: [PATCH 02/10] feat: replace Trade handler with Settlement filter for FlashLoanRouter --- ponder.config.ts | 9 +- src/application/handlers/settlement.ts | 241 ++++++++++++++++++------- src/data.ts | 23 ++- 3 files changed, 204 insertions(+), 69 deletions(-) diff --git a/ponder.config.ts b/ponder.config.ts index cebaba8..9c63bac 100644 --- a/ponder.config.ts +++ b/ponder.config.ts @@ -3,6 +3,7 @@ import { ComposableCowContract, COMPOSABLE_COW_DEPLOYMENTS, CoWShedFactoryContract, + FLASH_LOAN_ROUTER_ADDRESSES, GPv2SettlementContract, } from "./src/data"; @@ -16,7 +17,13 @@ export default createConfig({ contracts: { ComposableCow: ComposableCowContract, CoWShedFactory: CoWShedFactoryContract, - GPv2Settlement: GPv2SettlementContract, + GPv2Settlement: { + ...GPv2SettlementContract, + filter: { + event: "Settlement", + args: { solver: FLASH_LOAN_ROUTER_ADDRESSES.mainnet }, + }, + }, }, blocks: { RemovalPoller: { diff --git a/src/application/handlers/settlement.ts b/src/application/handlers/settlement.ts index 8116fae..f1fda8d 100644 --- a/src/application/handlers/settlement.ts +++ b/src/application/handlers/settlement.ts @@ -1,78 +1,193 @@ import { ponder } from "ponder:registry"; import { AddressType, ownerMapping, transaction } from "ponder:schema"; import { and, eq } from "ponder"; +import { keccak256, toBytes } from "viem"; import { AaveV3AdapterHelperAbi } from "../../../abis/AaveV3AdapterHelperAbi"; -import { AAVE_V3_ADAPTER_FACTORY_ADDRESS } from "../../data"; +import { + AAVE_V3_ADAPTER_FACTORY_ADDRESSES, + GPV2_SETTLEMENT_DEPLOYMENTS, +} from "../../data"; + +// Trade(address,address,address,uint256,uint256,uint256,bytes) — topic0 hash +const TRADE_TOPIC = keccak256( + toBytes("Trade(address,address,address,uint256,uint256,uint256,bytes)"), +); + +// ── Stats / timing ──────────────────────────────────────────────────────────── +// Logged every LOG_INTERVAL_MS to measure per-step cost without flooding logs. +const stats = { + total: 0, // Settlement events processed + tradeLogsFound: 0, // Trade logs found in receipts + skippedAlreadyMapped: 0, + skippedEOA: 0, + skippedNotAdapter: 0, + mapped: 0, + msFactory: 0, +}; +let statsLastLogAt = Date.now(); +const LOG_INTERVAL_MS = 30_000; + +function maybeLogStats() { + if (Date.now() - statsLastLogAt < LOG_INTERVAL_MS) return; + const contractAddresses = + stats.tradeLogsFound - stats.skippedAlreadyMapped - stats.skippedEOA; + console.log( + `[SETTLEMENT:STATS] settlements=${stats.total}` + + ` tradeLogs=${stats.tradeLogsFound}` + + ` alreadyMapped=${stats.skippedAlreadyMapped}` + + ` eoa=${stats.skippedEOA}` + + ` notAdapter=${stats.skippedNotAdapter}` + + ` mapped=${stats.mapped}` + + ` | avgFactory=${contractAddresses > 0 ? (stats.msFactory / contractAddresses).toFixed(1) : 0}ms`, + ); + statsLastLogAt = Date.now(); +} + +// FACTORY() selector — keccak256("FACTORY()")[0:4], confirmed from RPC logs. +// Using raw eth_call instead of readContract to avoid Ponder's WARN on revert, +// which floods the log since non-adapter contracts do not implement FACTORY(). +const FACTORY_SELECTOR = "0x2dd31000" as const; + +ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { + // Kill switch: set DISABLE_SETTLEMENT_FACTORY_CHECK=true to skip all RPC + // calls in this handler. Use to benchmark base throughput vs. factory cost. + if (process.env.DISABLE_SETTLEMENT_FACTORY_CHECK === "true") return; -ponder.on("GPv2Settlement:Trade", async ({ event, context }) => { - const { owner } = event.args; - const ownerAddress = owner.toLowerCase() as `0x${string}`; const chainId = context.chain.id; + const chainName = context.chain.name; + + // Resolve chain-specific addresses — skip safely if chain is not configured + const settlementDeployment = + GPV2_SETTLEMENT_DEPLOYMENTS[ + chainName as keyof typeof GPV2_SETTLEMENT_DEPLOYMENTS + ]; + if (!settlementDeployment) return; + const settlementAddress = settlementDeployment.address.toLowerCase(); + + const adapterFactoryAddress = + AAVE_V3_ADAPTER_FACTORY_ADDRESSES[ + chainName as keyof typeof AAVE_V3_ADAPTER_FACTORY_ADDRESSES + ]?.toLowerCase(); + if (!adapterFactoryAddress) return; + + stats.total++; + + // Fetch the full receipt to access all logs in the transaction. + // Volume is negligible (FlashLoanRouter settlements only), so the extra RPC + // call per settlement is acceptable and much cheaper than the old per-trade approach. + const receipt = await context.client.getTransactionReceipt({ + hash: event.transaction.hash, + }); + + for (const log of receipt.logs) { + // Only Trade logs emitted by GPv2Settlement in this same transaction + if (log.address.toLowerCase() !== settlementAddress) continue; + if (log.topics[0] !== TRADE_TOPIC) continue; + + stats.tradeLogsFound++; - // Skip if already mapped (adapter seen in a prior trade) - const existing = await context.db.sql - .select() - .from(ownerMapping) - .where( - and( - eq(ownerMapping.chainId, chainId), - eq(ownerMapping.address, ownerAddress), - ), - ) - .limit(1); - - if (existing.length > 0) return; - - // Skip if EOA (no bytecode) - const code = await context.client.getCode({ address: owner }); - if (!code || code === "0x") return; - - // Check for Aave adapter via FACTORY() — silently skip if call reverts - let factoryAddress: `0x${string}`; - try { - factoryAddress = await context.client.readContract({ + // Decode owner from topics[1] — ABI-encoded 32-byte padded address + const owner = `0x${log.topics[1]!.slice(26)}` as `0x${string}`; + const ownerAddress = owner.toLowerCase() as `0x${string}`; + + // Skip if already mapped (adapter seen in a prior settlement) + const existing = await context.db.sql + .select() + .from(ownerMapping) + .where( + and( + eq(ownerMapping.chainId, chainId), + eq(ownerMapping.address, ownerAddress), + ), + ) + .limit(1); + + if (existing.length > 0) { + stats.skippedAlreadyMapped++; + maybeLogStats(); + continue; + } + + // Skip if EOA (no bytecode) + const code = await context.client.getCode({ address: owner }); + if (!code || code === "0x") { + stats.skippedEOA++; + maybeLogStats(); + continue; + } + + // Check for Aave adapter via raw eth_call. + // readContract() is intentionally avoided here: Ponder logs a WARN for every + // revert, and FACTORY() reverts on any non-adapter contract. + const t1 = Date.now(); + let factoryData: `0x${string}` | undefined; + try { + const result = await context.client.call({ + to: owner, + data: FACTORY_SELECTOR, + }); + factoryData = result.data; + } catch { + stats.msFactory += Date.now() - t1; + stats.skippedNotAdapter++; + maybeLogStats(); + continue; + } + stats.msFactory += Date.now() - t1; + + // ABI-encoded address = 32 bytes = 66 hex chars (including 0x prefix) + if (!factoryData || factoryData.length < 66) { + stats.skippedNotAdapter++; + maybeLogStats(); + continue; + } + + // Decode padded address: 0x + 24 zero-padding hex chars + 40 address hex chars + const factoryAddress = `0x${factoryData.slice(26)}` as `0x${string}`; + + if (factoryAddress.toLowerCase() !== adapterFactoryAddress) { + stats.skippedNotAdapter++; + maybeLogStats(); + continue; + } + + // Resolve EOA via owner() — this call should always succeed at this point + const eoaOwner = await context.client.readContract({ address: owner, abi: AaveV3AdapterHelperAbi, - functionName: "FACTORY", + functionName: "owner", }); - } catch { - // Not an Aave adapter (Safe, other ERC-1271 signer, etc.) - return; - } - if (factoryAddress.toLowerCase() !== AAVE_V3_ADAPTER_FACTORY_ADDRESS) return; + await context.db + .insert(transaction) + .values({ + hash: event.transaction.hash, + chainId, + blockNumber: event.block.number, + blockTimestamp: event.block.timestamp, + }) + .onConflictDoNothing(); - // Resolve EOA via owner() - const eoaOwner = await context.client.readContract({ - address: owner, - abi: AaveV3AdapterHelperAbi, - functionName: "owner", - }); + await context.db + .insert(ownerMapping) + .values({ + chainId, + address: ownerAddress, + owner: eoaOwner.toLowerCase() as `0x${string}`, + addressType: AddressType.FlashLoanHelper, + txHash: event.transaction.hash, + blockNumber: event.block.number, + resolutionDepth: 1, + }) + .onConflictDoNothing(); - await context.db - .insert(transaction) - .values({ - hash: event.transaction.hash, - chainId, - blockNumber: event.block.number, - blockTimestamp: event.block.timestamp, - }) - .onConflictDoNothing(); - - await context.db - .insert(ownerMapping) - .values({ - chainId, - address: ownerAddress, - owner: eoaOwner.toLowerCase() as `0x${string}`, - addressType: AddressType.FlashLoanHelper, - txHash: event.transaction.hash, - blockNumber: event.block.number, - resolutionDepth: 1, - }) - .onConflictDoNothing(); + stats.mapped++; + maybeLogStats(); - console.log( - `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED adapter=${ownerAddress} eoa=${eoaOwner.toLowerCase()} block=${event.block.number} chain=${chainId}`, - ); + console.log( + `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED adapter=${ownerAddress} eoa=${eoaOwner.toLowerCase()} block=${event.block.number} chain=${chainId}`, + ); + } + + maybeLogStats(); }); diff --git a/src/data.ts b/src/data.ts index dea450e..1a13ca6 100644 --- a/src/data.ts +++ b/src/data.ts @@ -40,8 +40,8 @@ export const CoWShedFactoryContract = { /** * GPv2Settlement — mainnet only. - * Start block 17883049 (ComposableCoW genesis), not 12593265 (Settlement genesis), - * to avoid syncing 2+ years of unrelated trades. + * + * Start block = AaveV3AdapterFactory deployment block, NOT ComposableCoW genesis. */ export const GPV2_SETTLEMENT_DEPLOYMENTS = { mainnet: { @@ -60,8 +60,21 @@ export const GPv2SettlementContract = { /** * AaveV3AdapterFactory — deploys per-user flash loan adapter proxies. * Detection: call FACTORY() on a contract; if it returns this address, it is an Aave adapter. - * Same address across all chains (CREATE2 deterministic deployment). * Not a Ponder-indexed contract — used for view calls only. */ -export const AAVE_V3_ADAPTER_FACTORY_ADDRESS = - "0xdeCc46a4b09162f5369c5c80383aaa9159bcf192" as const; +export const AAVE_V3_ADAPTER_FACTORY_ADDRESSES = { + mainnet: "0xdeCc46a4b09162f5369c5c80383aaa9159bcf192" as const, + // gnosis: "0x...", // TODO: verify + // arbitrum: "0x...", // TODO: verify +} as const; + +/** + * FlashLoanRouter — the CoW Protocol solver that submits all Aave flash loan settlements. + * Confirmed via ROUTER() on AaveV3AdapterFactory (immutable variable, cannot change). + * Used to filter GPv2Settlement:Settlement events to only those involving flash loans. + */ +export const FLASH_LOAN_ROUTER_ADDRESSES = { + mainnet: "0x9da8B48441583a2b93e2eF8213aAD0EC0b392C69" as const, + // gnosis: "0x...", // TODO: confirm via ROUTER() on gnosis AaveV3AdapterFactory + // arbitrum: "0x...", // TODO: confirm via ROUTER() on arbitrum AaveV3AdapterFactory +} as const; From 9f702524a6e42c444f9cf0e7fa0017b53007ecc8 Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 09:43:35 -0300 Subject: [PATCH 03/10] feat: add production deployment setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds Docker + Docker Swarm deployment infrastructure: - Dockerfile (multi-stage Node/pnpm, 24h healthcheck on /ready) - .dockerignore (excludes node_modules, tmp, logs, secrets) - deployment/docker-compose.yml (postgres on port 5433) - deployment/docker-stack.yml (ponder swarm service on port 40000) - deployment/manage.sh (build → network → postgres → stack deploy) - deployment/deploy-remotely.sh (rsync + scp + ssh orchestration) - deployment/static/start-db.sh (postgres memory auto-tuner) - .github/workflows/deploy.yml (auto-deploy on push to main) Schema per deploy: programmatic_orders_, injected by manage.sh. Requires 5 GitHub secrets: DEPLOY_SSH_PRIVATE_KEY, DEPLOY_SERVER_HOST, DEPLOY_SERVER_USER, DEPLOY_PATH, DEPLOY_ENV_FILE_CONTENT. --- .dockerignore | 10 +++ .env.example | 22 ++++++ .github/workflows/deploy.yml | 52 ++++++++++++++ .gitignore | 2 + Dockerfile | 38 ++++++++++ deployment/deploy-remotely.sh | 53 ++++++++++++++ deployment/docker-compose.yml | 37 ++++++++++ deployment/docker-stack.yml | 44 ++++++++++++ deployment/manage.sh | 127 ++++++++++++++++++++++++++++++++++ deployment/static/start-db.sh | 36 ++++++++++ package.json | 2 +- 11 files changed, 422 insertions(+), 1 deletion(-) create mode 100644 .dockerignore create mode 100644 .github/workflows/deploy.yml create mode 100644 Dockerfile create mode 100755 deployment/deploy-remotely.sh create mode 100644 deployment/docker-compose.yml create mode 100644 deployment/docker-stack.yml create mode 100755 deployment/manage.sh create mode 100755 deployment/static/start-db.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..b5f9de5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +.git +node_modules +.env +.env.local +.vite +tmp +*.log +ponder.log +deployment/.env +generated diff --git a/.env.example b/.env.example index 0c1e315..755e53c 100644 --- a/.env.example +++ b/.env.example @@ -14,6 +14,28 @@ MAINNET_RPC_URL=https://eth.api.pocket.network # Database (optional — defaults to Ponder's built-in SQLite for dev) # Set this to use PostgreSQL from docker-compose.yml DATABASE_URL=postgresql://postgres:postgres@localhost:5432/programmatic-orders +# Schema for this app (required when using Postgres; avoids "previously used by a different Ponder app") +DATABASE_SCHEMA=programmatic_orders + +# Dev/local: reduce RPC usage during sync (set to 1 to disable) +# DISABLE_REMOVAL_POLL=true — skip multicall singleOrders (RemovalPoller) every N blocks +# DISABLE_SETTLEMENT_FACTORY_CHECK=true — skip getCode + FACTORY() calls in the GPv2Settlement:Trade +# handler entirely. Use to benchmark base sync throughput vs. the cost of those RPC calls. + # Logging (optional) # PINO_LOG_LEVEL=info + +# ============================================================ +# Production deployment (deployment/.env on remote machine) +# ============================================================ +PROJECT_PREFIX=cow-programmatic +POSTGRES_USER=cow_programmatic +POSTGRES_PASSWORD= +POSTGRES_DB=cow_programmatic +POSTGRES_PORT=5433 +POSTGRES_MEMORY_LIMIT=1G +PONDER_EXPOSED_PORT=40000 +PONDER_MEMORY_LIMIT=2G +# DATABASE_SCHEMA is injected automatically by manage.sh as: +# programmatic_orders_ — do not set here diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..93f2f37 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,52 @@ +name: Deploy to Production + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + deploy: + name: Deploy to Production + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up SSH key + run: | + mkdir -p ~/.ssh + echo "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + + - name: Set up SSH config + run: | + cat < ~/.ssh/config + Host cow-deploy + HostName ${{ secrets.DEPLOY_SERVER_HOST }} + User ${{ secrets.DEPLOY_SERVER_USER }} + Port 22 + IdentityFile ~/.ssh/id_ed25519 + StrictHostKeyChecking no + ServerAliveInterval 30 + ServerAliveCountMax 10 + TCPKeepAlive yes + EOF + chmod 600 ~/.ssh/config + + - name: Create .env file + run: | + echo "${{ secrets.DEPLOY_ENV_FILE_CONTENT }}" > .env + + - name: Run deploy script + run: | + cd deployment + bash deploy-remotely.sh \ + cow-deploy:${{ secrets.DEPLOY_PATH }} \ + ../.env diff --git a/.gitignore b/.gitignore index a40e084..633acb3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,9 @@ yarn-error.log* .DS_Store # Env files +.env .env.local +deployment/.env # Ponder /generated/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7fd307b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM node:22-alpine AS base + +ENV PNPM_HOME="/pnpm" +ENV PATH="$PNPM_HOME:$PATH" +RUN npm install -g pnpm@10 + +WORKDIR /usr/src/app + +# ---- build stage ---- +FROM base AS build + +COPY package.json pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile + +COPY . . + +# ---- production image ---- +FROM base + +RUN apk add --no-cache curl + +ENV NODE_ENV=production + +COPY --from=build /usr/src/app ./ +RUN pnpm install --frozen-lockfile + +HEALTHCHECK \ + --start-period=24h \ + --start-interval=1s \ + --retries=3 \ + CMD curl -f http://localhost:3000/ready || exit 1 + +EXPOSE 3000/tcp + +CMD ["pnpm", "start"] + +ARG PIPELINE_BUILD_TAG="unknown" +ENV APP_REVISION=$PIPELINE_BUILD_TAG diff --git a/deployment/deploy-remotely.sh b/deployment/deploy-remotely.sh new file mode 100755 index 0000000..e06d0f7 --- /dev/null +++ b/deployment/deploy-remotely.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -exo pipefail + +REPO_ROOT_DIR=$(git rev-parse --show-toplevel) +APP_REVISION=$(git rev-parse --short HEAD) + +DEPLOY_TARGET="${1:-}" +ENV_FILE_PATH="${2:-.env}" + +if [[ -z "$DEPLOY_TARGET" ]]; then + echo "Usage: $0 [env_file_path]" + exit 1 +fi + +if [[ "$DEPLOY_TARGET" == "-" ]]; then + # Local deployment + TARGET_DEPLOY_DIR="$REPO_ROOT_DIR" + APP_DEPLOY_DIR="$TARGET_DEPLOY_DIR/deployment" + + bash "$APP_DEPLOY_DIR/manage.sh" ${MANAGE_CMD_OVERRIDE:-up} \ + --env-file "$ENV_FILE_PATH" \ + --revision "$APP_REVISION" +elif [[ "$DEPLOY_TARGET" =~ ^[^:]+:.+ ]]; then + # Remote deployment via SSH + SSH_HOST=$(echo "$DEPLOY_TARGET" | cut -d':' -f1) + REMOTE_PATH=$(echo "$DEPLOY_TARGET" | cut -d':' -f2-) + + # Sync repository to remote + # .env is excluded — copied separately via scp to preserve server secrets + rsync -avz --delete \ + --mkpath \ + --exclude='.git' \ + --exclude='node_modules' \ + --exclude='.env' \ + --exclude='.env.local' \ + --exclude='.vite' \ + --exclude='*.log' \ + --exclude='tmp/' \ + "$REPO_ROOT_DIR/" "$SSH_HOST:$REMOTE_PATH/" + + # Copy .env to deployment directory on remote (separate from rsync) + REMOTE_ENV_PATH="$REMOTE_PATH/deployment/.env" + scp "$ENV_FILE_PATH" "$SSH_HOST:$REMOTE_ENV_PATH" + + APP_DEPLOY_DIR="$REMOTE_PATH/deployment" + MANAGE_CMD="${MANAGE_CMD_OVERRIDE:-up}" + + # Run manage.sh on remote + ssh "$SSH_HOST" "cd $APP_DEPLOY_DIR && bash manage.sh $MANAGE_CMD --env-file .env --revision $APP_REVISION" +else + echo "Error: must be '-' or SSH_HOST:PATH" + exit 1 +fi diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml new file mode 100644 index 0000000..4641519 --- /dev/null +++ b/deployment/docker-compose.yml @@ -0,0 +1,37 @@ +services: + postgres: + image: postgres:16 + restart: unless-stopped + command: ["bash", "/start-db.sh"] + environment: + POSTGRES_DB: ${POSTGRES_DB:?error} + POSTGRES_USER: ${POSTGRES_USER:?error} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?error} + POSTGRES_MEMORY_LIMIT: ${POSTGRES_MEMORY_LIMIT:-1G} + POSTGRES_INITDB_ARGS: "--encoding=UTF8 --locale=en_US.UTF-8" + shm_size: 256m + deploy: + resources: + limits: + cpus: "2" + memory: ${POSTGRES_MEMORY_LIMIT:-1G} + ports: + - "${POSTGRES_PORT:-5433}:5432" + volumes: + - ./static/start-db.sh:/start-db.sh:ro + - postgres-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + interval: 10s + timeout: 5s + retries: 5 + +networks: + default: + name: ${PROJECT_PREFIX:?error}-default + external: true + +volumes: + postgres-data: + name: ${PROJECT_PREFIX:?error}-postgres-data + driver: local diff --git a/deployment/docker-stack.yml b/deployment/docker-stack.yml new file mode 100644 index 0000000..30e3b86 --- /dev/null +++ b/deployment/docker-stack.yml @@ -0,0 +1,44 @@ +x-ponder-deploy: &ponder-deploy + replicas: 1 + resources: + limits: + cpus: "2" + memory: ${PONDER_MEMORY_LIMIT:-2G} + reservations: + cpus: "0.5" + memory: 512M + update_config: + order: start-first + failure_action: rollback + restart_policy: + condition: on-failure + delay: 10s + max_attempts: 5 + window: 120s + +services: + ponder: + image: ${PROJECT_PREFIX:?error}-ponder:${APP_REVISION:?error} + build: + context: .. + dockerfile: Dockerfile + args: + PIPELINE_BUILD_TAG: ${APP_REVISION} + environment: + DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:?error} + MAINNET_RPC_URL: ${MAINNET_RPC_URL:?error} + ports: + - "${PONDER_EXPOSED_PORT:-40000}:3000" + deploy: + <<: *ponder-deploy + logging: + driver: json-file + options: + max-size: "50m" + max-file: "5" + +networks: + default: + name: ${PROJECT_PREFIX:?error}-default + external: ${EXTERNAL_RESOURCES:-false} diff --git a/deployment/manage.sh b/deployment/manage.sh new file mode 100755 index 0000000..948824d --- /dev/null +++ b/deployment/manage.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat < [options] + +Commands: + up Deploy the stack + down Tear down the stack + +Options: + -e, --env-file Path to .env file (required) + -r, --revision Application revision (required for 'up') + -h, --help Show this help message +EOF + exit 1 +} + +COMMAND="${1:-}" +shift || true + +ENV_FILE_PATH="" +APP_REVISION="" + +while [[ $# -gt 0 ]]; do + case "$1" in + -e|--env-file) ENV_FILE_PATH="$2"; shift 2 ;; + -r|--revision) APP_REVISION="$2"; shift 2 ;; + -h|--help) usage ;; + *) echo "Unknown option: $1"; usage ;; + esac +done + +if [[ -z "$COMMAND" ]]; then echo "Error: command required (up|down)"; usage; fi +if [[ -z "$ENV_FILE_PATH" ]]; then echo "Error: --env-file required"; usage; fi + +APP_DEPLOY_DIR="$(dirname "$(realpath "$0")")" +cd "$APP_DEPLOY_DIR" + +set -a +source "$ENV_FILE_PATH" +set +a + +if [[ -z "${PROJECT_PREFIX:-}" ]]; then + echo "Error: PROJECT_PREFIX must be set in the env file" + exit 1 +fi + +export PROJECT_PREFIX +export APP_REVISION="${APP_REVISION:-latest}" +# Inject schema name: programmatic_orders_ +export DATABASE_SCHEMA="programmatic_orders_${APP_REVISION}" + +cmd_up() { + if [[ -z "${APP_REVISION:-}" || "$APP_REVISION" == "latest" ]]; then + echo "Error: --revision is required for 'up'" + exit 1 + fi + + echo ">>> Building ponder image..." + EXTERNAL_RESOURCES=true docker compose \ + -p "${PROJECT_PREFIX}-stack" -f docker-stack.yml \ + build --no-cache --build-arg BUILDKIT_INLINE_CACHE=1 + + # Ensure swarm overlay network exists + NETWORK_NAME="${PROJECT_PREFIX}-default" + echo ">>> Ensuring swarm network: ${NETWORK_NAME}..." + + NETWORK_SCOPE=$(docker network inspect "$NETWORK_NAME" --format '{{.Scope}}' 2>/dev/null || echo "none") + + if [[ "$NETWORK_SCOPE" == "local" ]]; then + echo ">>> Migrating local network to overlay..." + CONTAINERS=$(docker network inspect "$NETWORK_NAME" --format '{{range .Containers}}{{.Name}} {{end}}' 2>/dev/null || echo "") + for container in $CONTAINERS; do + docker stop "$container" 2>/dev/null || true + docker rm "$container" 2>/dev/null || true + done + docker compose -p "${PROJECT_PREFIX}" -f docker-compose.yml down --remove-orphans 2>/dev/null || true + docker network rm "$NETWORK_NAME" 2>/dev/null && NETWORK_SCOPE="none" || true + fi + + if [[ "$NETWORK_SCOPE" == "none" ]]; then + docker network create --driver overlay --attachable "$NETWORK_NAME" + fi + + echo ">>> Starting postgres..." + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + up -d --remove-orphans + + echo ">>> Waiting for postgres to be healthy..." + sleep 5s + + echo ">>> Deploying stack (DATABASE_SCHEMA=${DATABASE_SCHEMA})..." + EXTERNAL_RESOURCES=true docker stack deploy \ + --compose-file docker-stack.yml \ + --prune --detach --with-registry-auth --resolve-image never \ + "${PROJECT_PREFIX}" + + echo ">>> Cleaning up old ponder images..." + IMAGE_NAME="${PROJECT_PREFIX}-ponder" + OLD_IMAGES=$(docker images --format "{{.Repository}}:{{.Tag}}" "$IMAGE_NAME" | grep -v ":${APP_REVISION}$" || true) + if [[ -n "$OLD_IMAGES" ]]; then + echo "$OLD_IMAGES" | xargs -r docker rmi 2>/dev/null || true + fi + docker image prune -f 2>/dev/null || true + docker container prune -f 2>/dev/null || true + + echo ">>> Deploy complete. DATABASE_SCHEMA=${DATABASE_SCHEMA}" +} + +cmd_down() { + echo ">>> Removing stack..." + EXTERNAL_RESOURCES=true docker stack rm "${PROJECT_PREFIX}" || true + + echo ">>> Stopping postgres..." + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + down -v --remove-orphans || true +} + +case "$COMMAND" in + up) cmd_up ;; + down) cmd_down ;; + *) echo "Unknown command: $COMMAND"; usage ;; +esac diff --git a/deployment/static/start-db.sh b/deployment/static/start-db.sh new file mode 100755 index 0000000..3207dfc --- /dev/null +++ b/deployment/static/start-db.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +POSTGRES_MAX_CONNECTIONS="${POSTGRES_MAX_CONNECTIONS:-100}" + +if [ -n "${POSTGRES_MEMORY_LIMIT:-}" ]; then + LIMIT_BYTES=$(numfmt --from=iec "${POSTGRES_MEMORY_LIMIT}" 2>/dev/null) + if [ -z "$LIMIT_BYTES" ] || [ "$LIMIT_BYTES" = "0" ]; then + echo "Error: Invalid POSTGRES_MEMORY_LIMIT value: $POSTGRES_MEMORY_LIMIT" >&2 + exit 1 + fi + TOTAL_RAM_MB=$((LIMIT_BYTES / 1024 / 1024)) +else + TOTAL_RAM_KB=$(grep MemTotal /proc/meminfo | awk '{print $2}') + TOTAL_RAM_MB=$((TOTAL_RAM_KB / 1024)) +fi + +SHARED_BUFFERS_MB=$((TOTAL_RAM_MB * 20 / 100)) +MAINTENANCE_WORK_MEM_MB=$((TOTAL_RAM_MB * 5 / 100)) +EFFECTIVE_CACHE_SIZE_MB=$((TOTAL_RAM_MB / 2)) +WORK_MEM_MB=$(( (TOTAL_RAM_MB * 25 / 100) / POSTGRES_MAX_CONNECTIONS )) + +if [ "$WORK_MEM_MB" -lt 1 ]; then WORK_MEM_MB=1; fi +if [ "$SHARED_BUFFERS_MB" -lt 32 ]; then SHARED_BUFFERS_MB=32; fi +if [ "$MAINTENANCE_WORK_MEM_MB" -lt 16 ]; then MAINTENANCE_WORK_MEM_MB=16; fi + +set -x +exec docker-entrypoint.sh \ + -c "max_connections=${POSTGRES_MAX_CONNECTIONS}" \ + -c "shared_buffers=${SHARED_BUFFERS_MB}MB" \ + -c "work_mem=${WORK_MEM_MB}MB" \ + -c "maintenance_work_mem=${MAINTENANCE_WORK_MEM_MB}MB" \ + -c "effective_cache_size=${EFFECTIVE_CACHE_SIZE_MB}MB" \ + -c "max_wal_size=1GB" \ + -c "min_wal_size=256MB" \ + -c "checkpoint_completion_target=0.9" \ + -c "wal_buffers=8MB" diff --git a/package.json b/package.json index 9b2aa51..0b58b7e 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "type": "module", "scripts": { "dev": "ponder dev", - "start": "ponder start --schema ${DATABASE_SCHEMA:-public}", + "start": "ponder start -p 3000 --schema ${DATABASE_SCHEMA:-public}", "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint . --ext .ts", From 5ec97aaa590f5d02fe9fcd3fa038716c73442d8c Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 10:15:58 -0300 Subject: [PATCH 04/10] fix: rename function maybeLogStats to logStatsIfIntervalPassed --- src/application/handlers/settlement.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/application/handlers/settlement.ts b/src/application/handlers/settlement.ts index f1fda8d..767c180 100644 --- a/src/application/handlers/settlement.ts +++ b/src/application/handlers/settlement.ts @@ -27,7 +27,7 @@ const stats = { let statsLastLogAt = Date.now(); const LOG_INTERVAL_MS = 30_000; -function maybeLogStats() { +function logStatsIfIntervalPassed() { if (Date.now() - statsLastLogAt < LOG_INTERVAL_MS) return; const contractAddresses = stats.tradeLogsFound - stats.skippedAlreadyMapped - stats.skippedEOA; @@ -104,7 +104,7 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { if (existing.length > 0) { stats.skippedAlreadyMapped++; - maybeLogStats(); + logStatsIfIntervalPassed(); continue; } @@ -112,7 +112,7 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { const code = await context.client.getCode({ address: owner }); if (!code || code === "0x") { stats.skippedEOA++; - maybeLogStats(); + logStatsIfIntervalPassed(); continue; } @@ -130,7 +130,7 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { } catch { stats.msFactory += Date.now() - t1; stats.skippedNotAdapter++; - maybeLogStats(); + logStatsIfIntervalPassed(); continue; } stats.msFactory += Date.now() - t1; @@ -138,7 +138,7 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { // ABI-encoded address = 32 bytes = 66 hex chars (including 0x prefix) if (!factoryData || factoryData.length < 66) { stats.skippedNotAdapter++; - maybeLogStats(); + logStatsIfIntervalPassed(); continue; } @@ -147,7 +147,7 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { if (factoryAddress.toLowerCase() !== adapterFactoryAddress) { stats.skippedNotAdapter++; - maybeLogStats(); + logStatsIfIntervalPassed(); continue; } @@ -182,12 +182,12 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { .onConflictDoNothing(); stats.mapped++; - maybeLogStats(); + logStatsIfIntervalPassed(); console.log( `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED adapter=${ownerAddress} eoa=${eoaOwner.toLowerCase()} block=${event.block.number} chain=${chainId}`, ); } - maybeLogStats(); + logStatsIfIntervalPassed(); }); From cb31c59c815ad0b53cad30757b1facc8ce9a27e5 Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 10:39:45 -0300 Subject: [PATCH 05/10] fix: switche databa_schema to a fixed schema name --- deployment/manage.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/deployment/manage.sh b/deployment/manage.sh index 948824d..093f4a5 100755 --- a/deployment/manage.sh +++ b/deployment/manage.sh @@ -49,8 +49,7 @@ fi export PROJECT_PREFIX export APP_REVISION="${APP_REVISION:-latest}" -# Inject schema name: programmatic_orders_ -export DATABASE_SCHEMA="programmatic_orders_${APP_REVISION}" +export DATABASE_SCHEMA="programmatic_orders" cmd_up() { if [[ -z "${APP_REVISION:-}" || "$APP_REVISION" == "latest" ]]; then @@ -107,7 +106,7 @@ cmd_up() { docker image prune -f 2>/dev/null || true docker container prune -f 2>/dev/null || true - echo ">>> Deploy complete. DATABASE_SCHEMA=${DATABASE_SCHEMA}" + echo ">>> Deploy complete." } cmd_down() { From c7f39581d6d1c41a05d9ad157f3830ec9eed3f56 Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 10:41:15 -0300 Subject: [PATCH 06/10] chore: add test branch on deploy --- .github/workflows/deploy.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 93f2f37..e444519 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -9,6 +9,7 @@ on: push: branches: - main + - jefferson/cow-751-fix-m2-deploy jobs: deploy: From b7007bde33ce20c8508bdac3c22ee18a12f0ca4b Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 14:35:55 -0300 Subject: [PATCH 07/10] fix: replace docker swarm stack with plain docker compose --- deployment/docker-compose.yml | 32 +++++++++++++++++-------- deployment/docker-stack.yml | 44 ----------------------------------- deployment/manage.sh | 43 ++++------------------------------ 3 files changed, 27 insertions(+), 92 deletions(-) delete mode 100644 deployment/docker-stack.yml diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml index 4641519..840dfc6 100644 --- a/deployment/docker-compose.yml +++ b/deployment/docker-compose.yml @@ -10,11 +10,6 @@ services: POSTGRES_MEMORY_LIMIT: ${POSTGRES_MEMORY_LIMIT:-1G} POSTGRES_INITDB_ARGS: "--encoding=UTF8 --locale=en_US.UTF-8" shm_size: 256m - deploy: - resources: - limits: - cpus: "2" - memory: ${POSTGRES_MEMORY_LIMIT:-1G} ports: - "${POSTGRES_PORT:-5433}:5432" volumes: @@ -26,12 +21,29 @@ services: timeout: 5s retries: 5 -networks: - default: - name: ${PROJECT_PREFIX:?error}-default - external: true + ponder: + image: ${PROJECT_PREFIX:?error}-ponder:${APP_REVISION:?error} + restart: unless-stopped + build: + context: .. + dockerfile: Dockerfile + args: + PIPELINE_BUILD_TAG: ${APP_REVISION} + environment: + DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:?error} + MAINNET_RPC_URL: ${MAINNET_RPC_URL:?error} + ports: + - "${PONDER_EXPOSED_PORT:-40000}:3000" + depends_on: + postgres: + condition: service_healthy + logging: + driver: json-file + options: + max-size: "50m" + max-file: "5" volumes: postgres-data: name: ${PROJECT_PREFIX:?error}-postgres-data - driver: local diff --git a/deployment/docker-stack.yml b/deployment/docker-stack.yml deleted file mode 100644 index 30e3b86..0000000 --- a/deployment/docker-stack.yml +++ /dev/null @@ -1,44 +0,0 @@ -x-ponder-deploy: &ponder-deploy - replicas: 1 - resources: - limits: - cpus: "2" - memory: ${PONDER_MEMORY_LIMIT:-2G} - reservations: - cpus: "0.5" - memory: 512M - update_config: - order: start-first - failure_action: rollback - restart_policy: - condition: on-failure - delay: 10s - max_attempts: 5 - window: 120s - -services: - ponder: - image: ${PROJECT_PREFIX:?error}-ponder:${APP_REVISION:?error} - build: - context: .. - dockerfile: Dockerfile - args: - PIPELINE_BUILD_TAG: ${APP_REVISION} - environment: - DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} - DATABASE_SCHEMA: ${DATABASE_SCHEMA:?error} - MAINNET_RPC_URL: ${MAINNET_RPC_URL:?error} - ports: - - "${PONDER_EXPOSED_PORT:-40000}:3000" - deploy: - <<: *ponder-deploy - logging: - driver: json-file - options: - max-size: "50m" - max-file: "5" - -networks: - default: - name: ${PROJECT_PREFIX:?error}-default - external: ${EXTERNAL_RESOURCES:-false} diff --git a/deployment/manage.sh b/deployment/manage.sh index 093f4a5..99d24c6 100755 --- a/deployment/manage.sh +++ b/deployment/manage.sh @@ -58,45 +58,15 @@ cmd_up() { fi echo ">>> Building ponder image..." - EXTERNAL_RESOURCES=true docker compose \ - -p "${PROJECT_PREFIX}-stack" -f docker-stack.yml \ - build --no-cache --build-arg BUILDKIT_INLINE_CACHE=1 - - # Ensure swarm overlay network exists - NETWORK_NAME="${PROJECT_PREFIX}-default" - echo ">>> Ensuring swarm network: ${NETWORK_NAME}..." - - NETWORK_SCOPE=$(docker network inspect "$NETWORK_NAME" --format '{{.Scope}}' 2>/dev/null || echo "none") - - if [[ "$NETWORK_SCOPE" == "local" ]]; then - echo ">>> Migrating local network to overlay..." - CONTAINERS=$(docker network inspect "$NETWORK_NAME" --format '{{range .Containers}}{{.Name}} {{end}}' 2>/dev/null || echo "") - for container in $CONTAINERS; do - docker stop "$container" 2>/dev/null || true - docker rm "$container" 2>/dev/null || true - done - docker compose -p "${PROJECT_PREFIX}" -f docker-compose.yml down --remove-orphans 2>/dev/null || true - docker network rm "$NETWORK_NAME" 2>/dev/null && NETWORK_SCOPE="none" || true - fi - - if [[ "$NETWORK_SCOPE" == "none" ]]; then - docker network create --driver overlay --attachable "$NETWORK_NAME" - fi + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + build --no-cache - echo ">>> Starting postgres..." + echo ">>> Deploying (DATABASE_SCHEMA=${DATABASE_SCHEMA})..." docker compose \ -p "${PROJECT_PREFIX}" -f docker-compose.yml \ up -d --remove-orphans - echo ">>> Waiting for postgres to be healthy..." - sleep 5s - - echo ">>> Deploying stack (DATABASE_SCHEMA=${DATABASE_SCHEMA})..." - EXTERNAL_RESOURCES=true docker stack deploy \ - --compose-file docker-stack.yml \ - --prune --detach --with-registry-auth --resolve-image never \ - "${PROJECT_PREFIX}" - echo ">>> Cleaning up old ponder images..." IMAGE_NAME="${PROJECT_PREFIX}-ponder" OLD_IMAGES=$(docker images --format "{{.Repository}}:{{.Tag}}" "$IMAGE_NAME" | grep -v ":${APP_REVISION}$" || true) @@ -110,10 +80,7 @@ cmd_up() { } cmd_down() { - echo ">>> Removing stack..." - EXTERNAL_RESOURCES=true docker stack rm "${PROJECT_PREFIX}" || true - - echo ">>> Stopping postgres..." + echo ">>> Stopping stack..." docker compose \ -p "${PROJECT_PREFIX}" -f docker-compose.yml \ down -v --remove-orphans || true From 988712e711d59dbac1b31eabe4111e3acee4781a Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 14:51:24 -0300 Subject: [PATCH 08/10] fix: add DISABLE_REMOVAL_POLL and DISABLE_SETTLEMENT_FACTORY_CHECK envs --- deployment/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml index 840dfc6..b33ec47 100644 --- a/deployment/docker-compose.yml +++ b/deployment/docker-compose.yml @@ -33,6 +33,8 @@ services: DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} DATABASE_SCHEMA: ${DATABASE_SCHEMA:?error} MAINNET_RPC_URL: ${MAINNET_RPC_URL:?error} + DISABLE_REMOVAL_POLL: ${DISABLE_REMOVAL_POLL:-false} + DISABLE_SETTLEMENT_FACTORY_CHECK: ${DISABLE_SETTLEMENT_FACTORY_CHECK:-false} ports: - "${PONDER_EXPOSED_PORT:-40000}:3000" depends_on: From dd755d85ba947eadd31c1ff4ac67e8c5928810ea Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Tue, 24 Mar 2026 19:03:46 -0300 Subject: [PATCH 09/10] fix: remove test branch from deploy config --- .github/workflows/deploy.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index e444519..93f2f37 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -9,7 +9,6 @@ on: push: branches: - main - - jefferson/cow-751-fix-m2-deploy jobs: deploy: From 829b0153290594abc11f96a078cd27f23cade4e7 Mon Sep 17 00:00:00 2001 From: Jefferson Bastos Date: Wed, 25 Mar 2026 12:04:54 -0300 Subject: [PATCH 10/10] feat: decode Trade log fields in AAVE_ADAPTER_MAPPED log --- src/application/handlers/settlement.ts | 27 ++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/application/handlers/settlement.ts b/src/application/handlers/settlement.ts index 767c180..5e60573 100644 --- a/src/application/handlers/settlement.ts +++ b/src/application/handlers/settlement.ts @@ -1,7 +1,7 @@ import { ponder } from "ponder:registry"; import { AddressType, ownerMapping, transaction } from "ponder:schema"; import { and, eq } from "ponder"; -import { keccak256, toBytes } from "viem"; +import { decodeAbiParameters, keccak256, toBytes } from "viem"; import { AaveV3AdapterHelperAbi } from "../../../abis/AaveV3AdapterHelperAbi"; import { AAVE_V3_ADAPTER_FACTORY_ADDRESSES, @@ -181,11 +181,34 @@ ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { }) .onConflictDoNothing(); + // Decode non-indexed Trade log fields: sellToken, buyToken, amounts, orderUid + const [sellToken, buyToken, sellAmount, buyAmount, , orderUid] = + decodeAbiParameters( + [ + { type: "address" }, + { type: "address" }, + { type: "uint256" }, + { type: "uint256" }, + { type: "uint256" }, + { type: "bytes" }, + ], + log.data, + ); + stats.mapped++; logStatsIfIntervalPassed(); console.log( - `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED adapter=${ownerAddress} eoa=${eoaOwner.toLowerCase()} block=${event.block.number} chain=${chainId}`, + `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED` + + ` adapter=${ownerAddress}` + + ` eoa=${eoaOwner.toLowerCase()}` + + ` orderUid=${orderUid}` + + ` sellToken=${sellToken.toLowerCase()}` + + ` buyToken=${buyToken.toLowerCase()}` + + ` sellAmount=${sellAmount}` + + ` buyAmount=${buyAmount}` + + ` block=${event.block.number}` + + ` chain=${chainId}`, ); }