diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..b5f9de5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +.git +node_modules +.env +.env.local +.vite +tmp +*.log +ponder.log +deployment/.env +generated diff --git a/.env.example b/.env.example index 0c1e315..755e53c 100644 --- a/.env.example +++ b/.env.example @@ -14,6 +14,28 @@ MAINNET_RPC_URL=https://eth.api.pocket.network # Database (optional — defaults to Ponder's built-in SQLite for dev) # Set this to use PostgreSQL from docker-compose.yml DATABASE_URL=postgresql://postgres:postgres@localhost:5432/programmatic-orders +# Schema for this app (required when using Postgres; avoids "previously used by a different Ponder app") +DATABASE_SCHEMA=programmatic_orders + +# Dev/local: reduce RPC usage during sync (set to 1 to disable) +# DISABLE_REMOVAL_POLL=true — skip multicall singleOrders (RemovalPoller) every N blocks +# DISABLE_SETTLEMENT_FACTORY_CHECK=true — skip getCode + FACTORY() calls in the GPv2Settlement:Trade +# handler entirely. Use to benchmark base sync throughput vs. the cost of those RPC calls. + # Logging (optional) # PINO_LOG_LEVEL=info + +# ============================================================ +# Production deployment (deployment/.env on remote machine) +# ============================================================ +PROJECT_PREFIX=cow-programmatic +POSTGRES_USER=cow_programmatic +POSTGRES_PASSWORD= +POSTGRES_DB=cow_programmatic +POSTGRES_PORT=5433 +POSTGRES_MEMORY_LIMIT=1G +PONDER_EXPOSED_PORT=40000 +PONDER_MEMORY_LIMIT=2G +# DATABASE_SCHEMA is injected automatically by manage.sh as: +# programmatic_orders_ — do not set here diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..93f2f37 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,52 @@ +name: Deploy to Production + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + deploy: + name: Deploy to Production + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up SSH key + run: | + mkdir -p ~/.ssh + echo "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + + - name: Set up SSH config + run: | + cat < ~/.ssh/config + Host cow-deploy + HostName ${{ secrets.DEPLOY_SERVER_HOST }} + User ${{ secrets.DEPLOY_SERVER_USER }} + Port 22 + IdentityFile ~/.ssh/id_ed25519 + StrictHostKeyChecking no + ServerAliveInterval 30 + ServerAliveCountMax 10 + TCPKeepAlive yes + EOF + chmod 600 ~/.ssh/config + + - name: Create .env file + run: | + echo "${{ secrets.DEPLOY_ENV_FILE_CONTENT }}" > .env + + - name: Run deploy script + run: | + cd deployment + bash deploy-remotely.sh \ + cow-deploy:${{ secrets.DEPLOY_PATH }} \ + ../.env diff --git a/.gitignore b/.gitignore index a40e084..633acb3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,9 @@ yarn-error.log* .DS_Store # Env files +.env .env.local +deployment/.env # Ponder /generated/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7fd307b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM node:22-alpine AS base + +ENV PNPM_HOME="/pnpm" +ENV PATH="$PNPM_HOME:$PATH" +RUN npm install -g pnpm@10 + +WORKDIR /usr/src/app + +# ---- build stage ---- +FROM base AS build + +COPY package.json pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile + +COPY . . + +# ---- production image ---- +FROM base + +RUN apk add --no-cache curl + +ENV NODE_ENV=production + +COPY --from=build /usr/src/app ./ +RUN pnpm install --frozen-lockfile + +HEALTHCHECK \ + --start-period=24h \ + --start-interval=1s \ + --retries=3 \ + CMD curl -f http://localhost:3000/ready || exit 1 + +EXPOSE 3000/tcp + +CMD ["pnpm", "start"] + +ARG PIPELINE_BUILD_TAG="unknown" +ENV APP_REVISION=$PIPELINE_BUILD_TAG diff --git a/deployment/deploy-remotely.sh b/deployment/deploy-remotely.sh new file mode 100755 index 0000000..e06d0f7 --- /dev/null +++ b/deployment/deploy-remotely.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -exo pipefail + +REPO_ROOT_DIR=$(git rev-parse --show-toplevel) +APP_REVISION=$(git rev-parse --short HEAD) + +DEPLOY_TARGET="${1:-}" +ENV_FILE_PATH="${2:-.env}" + +if [[ -z "$DEPLOY_TARGET" ]]; then + echo "Usage: $0 [env_file_path]" + exit 1 +fi + +if [[ "$DEPLOY_TARGET" == "-" ]]; then + # Local deployment + TARGET_DEPLOY_DIR="$REPO_ROOT_DIR" + APP_DEPLOY_DIR="$TARGET_DEPLOY_DIR/deployment" + + bash "$APP_DEPLOY_DIR/manage.sh" ${MANAGE_CMD_OVERRIDE:-up} \ + --env-file "$ENV_FILE_PATH" \ + --revision "$APP_REVISION" +elif [[ "$DEPLOY_TARGET" =~ ^[^:]+:.+ ]]; then + # Remote deployment via SSH + SSH_HOST=$(echo "$DEPLOY_TARGET" | cut -d':' -f1) + REMOTE_PATH=$(echo "$DEPLOY_TARGET" | cut -d':' -f2-) + + # Sync repository to remote + # .env is excluded — copied separately via scp to preserve server secrets + rsync -avz --delete \ + --mkpath \ + --exclude='.git' \ + --exclude='node_modules' \ + --exclude='.env' \ + --exclude='.env.local' \ + --exclude='.vite' \ + --exclude='*.log' \ + --exclude='tmp/' \ + "$REPO_ROOT_DIR/" "$SSH_HOST:$REMOTE_PATH/" + + # Copy .env to deployment directory on remote (separate from rsync) + REMOTE_ENV_PATH="$REMOTE_PATH/deployment/.env" + scp "$ENV_FILE_PATH" "$SSH_HOST:$REMOTE_ENV_PATH" + + APP_DEPLOY_DIR="$REMOTE_PATH/deployment" + MANAGE_CMD="${MANAGE_CMD_OVERRIDE:-up}" + + # Run manage.sh on remote + ssh "$SSH_HOST" "cd $APP_DEPLOY_DIR && bash manage.sh $MANAGE_CMD --env-file .env --revision $APP_REVISION" +else + echo "Error: must be '-' or SSH_HOST:PATH" + exit 1 +fi diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml new file mode 100644 index 0000000..b33ec47 --- /dev/null +++ b/deployment/docker-compose.yml @@ -0,0 +1,51 @@ +services: + postgres: + image: postgres:16 + restart: unless-stopped + command: ["bash", "/start-db.sh"] + environment: + POSTGRES_DB: ${POSTGRES_DB:?error} + POSTGRES_USER: ${POSTGRES_USER:?error} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?error} + POSTGRES_MEMORY_LIMIT: ${POSTGRES_MEMORY_LIMIT:-1G} + POSTGRES_INITDB_ARGS: "--encoding=UTF8 --locale=en_US.UTF-8" + shm_size: 256m + ports: + - "${POSTGRES_PORT:-5433}:5432" + volumes: + - ./static/start-db.sh:/start-db.sh:ro + - postgres-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + interval: 10s + timeout: 5s + retries: 5 + + ponder: + image: ${PROJECT_PREFIX:?error}-ponder:${APP_REVISION:?error} + restart: unless-stopped + build: + context: .. + dockerfile: Dockerfile + args: + PIPELINE_BUILD_TAG: ${APP_REVISION} + environment: + DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:?error} + MAINNET_RPC_URL: ${MAINNET_RPC_URL:?error} + DISABLE_REMOVAL_POLL: ${DISABLE_REMOVAL_POLL:-false} + DISABLE_SETTLEMENT_FACTORY_CHECK: ${DISABLE_SETTLEMENT_FACTORY_CHECK:-false} + ports: + - "${PONDER_EXPOSED_PORT:-40000}:3000" + depends_on: + postgres: + condition: service_healthy + logging: + driver: json-file + options: + max-size: "50m" + max-file: "5" + +volumes: + postgres-data: + name: ${PROJECT_PREFIX:?error}-postgres-data diff --git a/deployment/manage.sh b/deployment/manage.sh new file mode 100755 index 0000000..99d24c6 --- /dev/null +++ b/deployment/manage.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat < [options] + +Commands: + up Deploy the stack + down Tear down the stack + +Options: + -e, --env-file Path to .env file (required) + -r, --revision Application revision (required for 'up') + -h, --help Show this help message +EOF + exit 1 +} + +COMMAND="${1:-}" +shift || true + +ENV_FILE_PATH="" +APP_REVISION="" + +while [[ $# -gt 0 ]]; do + case "$1" in + -e|--env-file) ENV_FILE_PATH="$2"; shift 2 ;; + -r|--revision) APP_REVISION="$2"; shift 2 ;; + -h|--help) usage ;; + *) echo "Unknown option: $1"; usage ;; + esac +done + +if [[ -z "$COMMAND" ]]; then echo "Error: command required (up|down)"; usage; fi +if [[ -z "$ENV_FILE_PATH" ]]; then echo "Error: --env-file required"; usage; fi + +APP_DEPLOY_DIR="$(dirname "$(realpath "$0")")" +cd "$APP_DEPLOY_DIR" + +set -a +source "$ENV_FILE_PATH" +set +a + +if [[ -z "${PROJECT_PREFIX:-}" ]]; then + echo "Error: PROJECT_PREFIX must be set in the env file" + exit 1 +fi + +export PROJECT_PREFIX +export APP_REVISION="${APP_REVISION:-latest}" +export DATABASE_SCHEMA="programmatic_orders" + +cmd_up() { + if [[ -z "${APP_REVISION:-}" || "$APP_REVISION" == "latest" ]]; then + echo "Error: --revision is required for 'up'" + exit 1 + fi + + echo ">>> Building ponder image..." + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + build --no-cache + + echo ">>> Deploying (DATABASE_SCHEMA=${DATABASE_SCHEMA})..." + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + up -d --remove-orphans + + echo ">>> Cleaning up old ponder images..." + IMAGE_NAME="${PROJECT_PREFIX}-ponder" + OLD_IMAGES=$(docker images --format "{{.Repository}}:{{.Tag}}" "$IMAGE_NAME" | grep -v ":${APP_REVISION}$" || true) + if [[ -n "$OLD_IMAGES" ]]; then + echo "$OLD_IMAGES" | xargs -r docker rmi 2>/dev/null || true + fi + docker image prune -f 2>/dev/null || true + docker container prune -f 2>/dev/null || true + + echo ">>> Deploy complete." +} + +cmd_down() { + echo ">>> Stopping stack..." + docker compose \ + -p "${PROJECT_PREFIX}" -f docker-compose.yml \ + down -v --remove-orphans || true +} + +case "$COMMAND" in + up) cmd_up ;; + down) cmd_down ;; + *) echo "Unknown command: $COMMAND"; usage ;; +esac diff --git a/deployment/static/start-db.sh b/deployment/static/start-db.sh new file mode 100755 index 0000000..3207dfc --- /dev/null +++ b/deployment/static/start-db.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +POSTGRES_MAX_CONNECTIONS="${POSTGRES_MAX_CONNECTIONS:-100}" + +if [ -n "${POSTGRES_MEMORY_LIMIT:-}" ]; then + LIMIT_BYTES=$(numfmt --from=iec "${POSTGRES_MEMORY_LIMIT}" 2>/dev/null) + if [ -z "$LIMIT_BYTES" ] || [ "$LIMIT_BYTES" = "0" ]; then + echo "Error: Invalid POSTGRES_MEMORY_LIMIT value: $POSTGRES_MEMORY_LIMIT" >&2 + exit 1 + fi + TOTAL_RAM_MB=$((LIMIT_BYTES / 1024 / 1024)) +else + TOTAL_RAM_KB=$(grep MemTotal /proc/meminfo | awk '{print $2}') + TOTAL_RAM_MB=$((TOTAL_RAM_KB / 1024)) +fi + +SHARED_BUFFERS_MB=$((TOTAL_RAM_MB * 20 / 100)) +MAINTENANCE_WORK_MEM_MB=$((TOTAL_RAM_MB * 5 / 100)) +EFFECTIVE_CACHE_SIZE_MB=$((TOTAL_RAM_MB / 2)) +WORK_MEM_MB=$(( (TOTAL_RAM_MB * 25 / 100) / POSTGRES_MAX_CONNECTIONS )) + +if [ "$WORK_MEM_MB" -lt 1 ]; then WORK_MEM_MB=1; fi +if [ "$SHARED_BUFFERS_MB" -lt 32 ]; then SHARED_BUFFERS_MB=32; fi +if [ "$MAINTENANCE_WORK_MEM_MB" -lt 16 ]; then MAINTENANCE_WORK_MEM_MB=16; fi + +set -x +exec docker-entrypoint.sh \ + -c "max_connections=${POSTGRES_MAX_CONNECTIONS}" \ + -c "shared_buffers=${SHARED_BUFFERS_MB}MB" \ + -c "work_mem=${WORK_MEM_MB}MB" \ + -c "maintenance_work_mem=${MAINTENANCE_WORK_MEM_MB}MB" \ + -c "effective_cache_size=${EFFECTIVE_CACHE_SIZE_MB}MB" \ + -c "max_wal_size=1GB" \ + -c "min_wal_size=256MB" \ + -c "checkpoint_completion_target=0.9" \ + -c "wal_buffers=8MB" diff --git a/package.json b/package.json index 9b2aa51..0b58b7e 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "type": "module", "scripts": { "dev": "ponder dev", - "start": "ponder start --schema ${DATABASE_SCHEMA:-public}", + "start": "ponder start -p 3000 --schema ${DATABASE_SCHEMA:-public}", "db": "ponder db", "codegen": "ponder codegen", "lint": "eslint . --ext .ts", diff --git a/ponder.config.ts b/ponder.config.ts index cebaba8..9c63bac 100644 --- a/ponder.config.ts +++ b/ponder.config.ts @@ -3,6 +3,7 @@ import { ComposableCowContract, COMPOSABLE_COW_DEPLOYMENTS, CoWShedFactoryContract, + FLASH_LOAN_ROUTER_ADDRESSES, GPv2SettlementContract, } from "./src/data"; @@ -16,7 +17,13 @@ export default createConfig({ contracts: { ComposableCow: ComposableCowContract, CoWShedFactory: CoWShedFactoryContract, - GPv2Settlement: GPv2SettlementContract, + GPv2Settlement: { + ...GPv2SettlementContract, + filter: { + event: "Settlement", + args: { solver: FLASH_LOAN_ROUTER_ADDRESSES.mainnet }, + }, + }, }, blocks: { RemovalPoller: { diff --git a/src/application/handlers/settlement.ts b/src/application/handlers/settlement.ts index 8116fae..5e60573 100644 --- a/src/application/handlers/settlement.ts +++ b/src/application/handlers/settlement.ts @@ -1,78 +1,216 @@ import { ponder } from "ponder:registry"; import { AddressType, ownerMapping, transaction } from "ponder:schema"; import { and, eq } from "ponder"; +import { decodeAbiParameters, keccak256, toBytes } from "viem"; import { AaveV3AdapterHelperAbi } from "../../../abis/AaveV3AdapterHelperAbi"; -import { AAVE_V3_ADAPTER_FACTORY_ADDRESS } from "../../data"; +import { + AAVE_V3_ADAPTER_FACTORY_ADDRESSES, + GPV2_SETTLEMENT_DEPLOYMENTS, +} from "../../data"; + +// Trade(address,address,address,uint256,uint256,uint256,bytes) — topic0 hash +const TRADE_TOPIC = keccak256( + toBytes("Trade(address,address,address,uint256,uint256,uint256,bytes)"), +); + +// ── Stats / timing ──────────────────────────────────────────────────────────── +// Logged every LOG_INTERVAL_MS to measure per-step cost without flooding logs. +const stats = { + total: 0, // Settlement events processed + tradeLogsFound: 0, // Trade logs found in receipts + skippedAlreadyMapped: 0, + skippedEOA: 0, + skippedNotAdapter: 0, + mapped: 0, + msFactory: 0, +}; +let statsLastLogAt = Date.now(); +const LOG_INTERVAL_MS = 30_000; + +function logStatsIfIntervalPassed() { + if (Date.now() - statsLastLogAt < LOG_INTERVAL_MS) return; + const contractAddresses = + stats.tradeLogsFound - stats.skippedAlreadyMapped - stats.skippedEOA; + console.log( + `[SETTLEMENT:STATS] settlements=${stats.total}` + + ` tradeLogs=${stats.tradeLogsFound}` + + ` alreadyMapped=${stats.skippedAlreadyMapped}` + + ` eoa=${stats.skippedEOA}` + + ` notAdapter=${stats.skippedNotAdapter}` + + ` mapped=${stats.mapped}` + + ` | avgFactory=${contractAddresses > 0 ? (stats.msFactory / contractAddresses).toFixed(1) : 0}ms`, + ); + statsLastLogAt = Date.now(); +} + +// FACTORY() selector — keccak256("FACTORY()")[0:4], confirmed from RPC logs. +// Using raw eth_call instead of readContract to avoid Ponder's WARN on revert, +// which floods the log since non-adapter contracts do not implement FACTORY(). +const FACTORY_SELECTOR = "0x2dd31000" as const; + +ponder.on("GPv2Settlement:Settlement", async ({ event, context }) => { + // Kill switch: set DISABLE_SETTLEMENT_FACTORY_CHECK=true to skip all RPC + // calls in this handler. Use to benchmark base throughput vs. factory cost. + if (process.env.DISABLE_SETTLEMENT_FACTORY_CHECK === "true") return; -ponder.on("GPv2Settlement:Trade", async ({ event, context }) => { - const { owner } = event.args; - const ownerAddress = owner.toLowerCase() as `0x${string}`; const chainId = context.chain.id; + const chainName = context.chain.name; + + // Resolve chain-specific addresses — skip safely if chain is not configured + const settlementDeployment = + GPV2_SETTLEMENT_DEPLOYMENTS[ + chainName as keyof typeof GPV2_SETTLEMENT_DEPLOYMENTS + ]; + if (!settlementDeployment) return; + const settlementAddress = settlementDeployment.address.toLowerCase(); + + const adapterFactoryAddress = + AAVE_V3_ADAPTER_FACTORY_ADDRESSES[ + chainName as keyof typeof AAVE_V3_ADAPTER_FACTORY_ADDRESSES + ]?.toLowerCase(); + if (!adapterFactoryAddress) return; + + stats.total++; + + // Fetch the full receipt to access all logs in the transaction. + // Volume is negligible (FlashLoanRouter settlements only), so the extra RPC + // call per settlement is acceptable and much cheaper than the old per-trade approach. + const receipt = await context.client.getTransactionReceipt({ + hash: event.transaction.hash, + }); + + for (const log of receipt.logs) { + // Only Trade logs emitted by GPv2Settlement in this same transaction + if (log.address.toLowerCase() !== settlementAddress) continue; + if (log.topics[0] !== TRADE_TOPIC) continue; + + stats.tradeLogsFound++; + + // Decode owner from topics[1] — ABI-encoded 32-byte padded address + const owner = `0x${log.topics[1]!.slice(26)}` as `0x${string}`; + const ownerAddress = owner.toLowerCase() as `0x${string}`; - // Skip if already mapped (adapter seen in a prior trade) - const existing = await context.db.sql - .select() - .from(ownerMapping) - .where( - and( - eq(ownerMapping.chainId, chainId), - eq(ownerMapping.address, ownerAddress), - ), - ) - .limit(1); - - if (existing.length > 0) return; - - // Skip if EOA (no bytecode) - const code = await context.client.getCode({ address: owner }); - if (!code || code === "0x") return; - - // Check for Aave adapter via FACTORY() — silently skip if call reverts - let factoryAddress: `0x${string}`; - try { - factoryAddress = await context.client.readContract({ + // Skip if already mapped (adapter seen in a prior settlement) + const existing = await context.db.sql + .select() + .from(ownerMapping) + .where( + and( + eq(ownerMapping.chainId, chainId), + eq(ownerMapping.address, ownerAddress), + ), + ) + .limit(1); + + if (existing.length > 0) { + stats.skippedAlreadyMapped++; + logStatsIfIntervalPassed(); + continue; + } + + // Skip if EOA (no bytecode) + const code = await context.client.getCode({ address: owner }); + if (!code || code === "0x") { + stats.skippedEOA++; + logStatsIfIntervalPassed(); + continue; + } + + // Check for Aave adapter via raw eth_call. + // readContract() is intentionally avoided here: Ponder logs a WARN for every + // revert, and FACTORY() reverts on any non-adapter contract. + const t1 = Date.now(); + let factoryData: `0x${string}` | undefined; + try { + const result = await context.client.call({ + to: owner, + data: FACTORY_SELECTOR, + }); + factoryData = result.data; + } catch { + stats.msFactory += Date.now() - t1; + stats.skippedNotAdapter++; + logStatsIfIntervalPassed(); + continue; + } + stats.msFactory += Date.now() - t1; + + // ABI-encoded address = 32 bytes = 66 hex chars (including 0x prefix) + if (!factoryData || factoryData.length < 66) { + stats.skippedNotAdapter++; + logStatsIfIntervalPassed(); + continue; + } + + // Decode padded address: 0x + 24 zero-padding hex chars + 40 address hex chars + const factoryAddress = `0x${factoryData.slice(26)}` as `0x${string}`; + + if (factoryAddress.toLowerCase() !== adapterFactoryAddress) { + stats.skippedNotAdapter++; + logStatsIfIntervalPassed(); + continue; + } + + // Resolve EOA via owner() — this call should always succeed at this point + const eoaOwner = await context.client.readContract({ address: owner, abi: AaveV3AdapterHelperAbi, - functionName: "FACTORY", + functionName: "owner", }); - } catch { - // Not an Aave adapter (Safe, other ERC-1271 signer, etc.) - return; - } - if (factoryAddress.toLowerCase() !== AAVE_V3_ADAPTER_FACTORY_ADDRESS) return; + await context.db + .insert(transaction) + .values({ + hash: event.transaction.hash, + chainId, + blockNumber: event.block.number, + blockTimestamp: event.block.timestamp, + }) + .onConflictDoNothing(); - // Resolve EOA via owner() - const eoaOwner = await context.client.readContract({ - address: owner, - abi: AaveV3AdapterHelperAbi, - functionName: "owner", - }); + await context.db + .insert(ownerMapping) + .values({ + chainId, + address: ownerAddress, + owner: eoaOwner.toLowerCase() as `0x${string}`, + addressType: AddressType.FlashLoanHelper, + txHash: event.transaction.hash, + blockNumber: event.block.number, + resolutionDepth: 1, + }) + .onConflictDoNothing(); - await context.db - .insert(transaction) - .values({ - hash: event.transaction.hash, - chainId, - blockNumber: event.block.number, - blockTimestamp: event.block.timestamp, - }) - .onConflictDoNothing(); - - await context.db - .insert(ownerMapping) - .values({ - chainId, - address: ownerAddress, - owner: eoaOwner.toLowerCase() as `0x${string}`, - addressType: AddressType.FlashLoanHelper, - txHash: event.transaction.hash, - blockNumber: event.block.number, - resolutionDepth: 1, - }) - .onConflictDoNothing(); + // Decode non-indexed Trade log fields: sellToken, buyToken, amounts, orderUid + const [sellToken, buyToken, sellAmount, buyAmount, , orderUid] = + decodeAbiParameters( + [ + { type: "address" }, + { type: "address" }, + { type: "uint256" }, + { type: "uint256" }, + { type: "uint256" }, + { type: "bytes" }, + ], + log.data, + ); - console.log( - `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED adapter=${ownerAddress} eoa=${eoaOwner.toLowerCase()} block=${event.block.number} chain=${chainId}`, - ); + stats.mapped++; + logStatsIfIntervalPassed(); + + console.log( + `[COW:SETTLEMENT:TRADE] AAVE_ADAPTER_MAPPED` + + ` adapter=${ownerAddress}` + + ` eoa=${eoaOwner.toLowerCase()}` + + ` orderUid=${orderUid}` + + ` sellToken=${sellToken.toLowerCase()}` + + ` buyToken=${buyToken.toLowerCase()}` + + ` sellAmount=${sellAmount}` + + ` buyAmount=${buyAmount}` + + ` block=${event.block.number}` + + ` chain=${chainId}`, + ); + } + + logStatsIfIntervalPassed(); }); diff --git a/src/data.ts b/src/data.ts index 8aed501..1a13ca6 100644 --- a/src/data.ts +++ b/src/data.ts @@ -40,13 +40,13 @@ export const CoWShedFactoryContract = { /** * GPv2Settlement — mainnet only. - * Start block 17883049 (ComposableCoW genesis), not 12593265 (Settlement genesis), - * to avoid syncing 2+ years of unrelated trades. + * + * Start block = AaveV3AdapterFactory deployment block, NOT ComposableCoW genesis. */ export const GPV2_SETTLEMENT_DEPLOYMENTS = { mainnet: { address: "0x9008D19f58AAbD9eD0D60971565AA8510560ab41" as const, - startBlock: 17883049, + startBlock: 23812751, // AaveV3AdapterFactory deployment block (Nov 16, 2025) }, } as const; @@ -60,8 +60,21 @@ export const GPv2SettlementContract = { /** * AaveV3AdapterFactory — deploys per-user flash loan adapter proxies. * Detection: call FACTORY() on a contract; if it returns this address, it is an Aave adapter. - * Same address across all chains (CREATE2 deterministic deployment). * Not a Ponder-indexed contract — used for view calls only. */ -export const AAVE_V3_ADAPTER_FACTORY_ADDRESS = - "0xdeCc46a4b09162f5369c5c80383aaa9159bcf192" as const; +export const AAVE_V3_ADAPTER_FACTORY_ADDRESSES = { + mainnet: "0xdeCc46a4b09162f5369c5c80383aaa9159bcf192" as const, + // gnosis: "0x...", // TODO: verify + // arbitrum: "0x...", // TODO: verify +} as const; + +/** + * FlashLoanRouter — the CoW Protocol solver that submits all Aave flash loan settlements. + * Confirmed via ROUTER() on AaveV3AdapterFactory (immutable variable, cannot change). + * Used to filter GPv2Settlement:Settlement events to only those involving flash loans. + */ +export const FLASH_LOAN_ROUTER_ADDRESSES = { + mainnet: "0x9da8B48441583a2b93e2eF8213aAD0EC0b392C69" as const, + // gnosis: "0x...", // TODO: confirm via ROUTER() on gnosis AaveV3AdapterFactory + // arbitrum: "0x...", // TODO: confirm via ROUTER() on arbitrum AaveV3AdapterFactory +} as const;