diff --git a/.env b/.env index d691898..8cd5d87 100644 --- a/.env +++ b/.env @@ -21,14 +21,17 @@ # rewards-eligibility REO eligibility oracle node # indexing-payments dipper + iisa (requires GHCR auth — see README) # Default: profiles that work out of the box. -COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer +COMPOSE_PROFILES=block-oracle # All profiles (indexing-payments requires GHCR auth — see README): #COMPOSE_PROFILES=rewards-eligibility,block-oracle,explorer,indexing-payments # --- Dev overrides --- # Uncomment and extend to build services from local source. # See compose/dev/README.md for available overrides. -#COMPOSE_FILE=docker-compose.yaml:compose/dev/graph-node.yaml +COMPOSE_FILE=docker-compose.yaml:compose/dev/dips.yaml +INDEXER_SERVICE_SOURCE_ROOT=../indexer-rs +INDEXER_AGENT_SOURCE_ROOT=../indexer +DIPPER_SOURCE_ROOT=../dipper # indexer components versions GRAPH_NODE_VERSION=v0.37.0 @@ -51,7 +54,7 @@ ELIGIBILITY_ORACLE_COMMIT=84710857394d3419f83dcbf6687a91f415cc1625 # network components versions BLOCK_ORACLE_COMMIT=3a3a425ff96130c3842cee7e43d06bbe3d729aed -CONTRACTS_COMMIT=0003fe3adf7394df5c9ac1cf4ef600c96a61259f +CONTRACTS_COMMIT=63094533b29b3a0628824d6e6eaa0f05e14f57b6 NETWORK_SUBGRAPH_COMMIT=5b6c22089a2e55db16586a19cbf6e1d73a93c7b9 TAP_CONTRACTS_COMMIT=e3351e70b3e5d9821bc0aaa90bb2173ca2a77af7 TAP_SUBGRAPH_COMMIT=cf7279f60433bf9a9d897ec2548c13c0607234cc @@ -99,6 +102,7 @@ BLOCK_EXPLORER=${BLOCK_EXPLORER_PORT} # Indexing Payments (used with indexing-payments override) DIPPER_ADMIN_RPC_PORT=9000 DIPPER_INDEXER_RPC_PORT=9001 +INDEXER_SERVICE_DIPS_RPC_PORT=7602 ## Chain config CHAIN_ID=1337 @@ -124,7 +128,7 @@ SUBGRAPH_2="9p1TRzaccKzWBN4P6YEwEUxYwJn6HwPxf5dKXK2NYxgS" # REO (Rewards Eligibility Oracle) # Set to 1 to deploy and configure the REO contract (Phase 4). Unset or 0 to skip. -REO_ENABLED=1 +REO_ENABLED=0 # eligibilityPeriod: how long an indexer stays eligible after renewal (seconds) REO_ELIGIBILITY_PERIOD=300 # oracleUpdateTimeout: fail-safe — if no oracle update for this long, all indexers eligible (seconds) diff --git a/compose/dev/README.md b/compose/dev/README.md index b21b5cc..1379b5b 100644 --- a/compose/dev/README.md +++ b/compose/dev/README.md @@ -31,5 +31,6 @@ Then `docker compose up -d` applies the overrides automatically. | `eligibility-oracle.yaml` | eligibility-oracle-node | `REO_BINARY` | | `dipper.yaml` | dipper | `DIPPER_BINARY` | | `iisa.yaml` | iisa | `IISA_VERSION=local` | +| `dips.yaml` | indexer-service, indexer-agent | `INDEXER_SERVICE_SOURCE_ROOT`, `INDEXER_AGENT_SOURCE_ROOT` | See each file's header comments for details. diff --git a/compose/dev/dips.yaml b/compose/dev/dips.yaml new file mode 100644 index 0000000..ed39a17 --- /dev/null +++ b/compose/dev/dips.yaml @@ -0,0 +1,78 @@ +# DIPs Development Override +# +# Overrides indexer-service and indexer-agent for DIPs development: +# - indexer-service: built from local source with [dips] config section +# - indexer-agent: hot-reload from local source with DIPs env vars +# +# Prerequisites: +# - Sibling checkouts: ../indexer-rs, ../indexer, ../dipper +# - indexing-payments profile enabled (for dipper service) +# +# Activate via .env: +# COMPOSE_PROFILES=indexing-payments,block-oracle +# COMPOSE_FILE=docker-compose.yaml:compose/dev/dips.yaml +# INDEXER_SERVICE_SOURCE_ROOT=../indexer-rs +# INDEXER_AGENT_SOURCE_ROOT=../indexer +# DIPPER_SOURCE_ROOT=../dipper + +services: + indexer-service: + build: + target: "wrapper" + dockerfile_inline: | + FROM rust:1-slim-bookworm AS wrapper + RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential curl git jq pkg-config \ + protobuf-compiler libssl-dev libsasl2-dev \ + && rm -rf /var/lib/apt/lists/* + entrypoint: ["bash", "/opt/run-dips.sh"] + volumes: + - ${INDEXER_SERVICE_SOURCE_ROOT:?Set INDEXER_SERVICE_SOURCE_ROOT to local indexer-rs checkout}:/opt/source + - ./containers/indexer/indexer-service/dev/run-dips.sh:/opt/run-dips.sh:ro + - ./containers/shared:/opt/shared:ro + - ./.env:/opt/config/.env:ro + - config-local:/opt/config:ro + ports: + - "${INDEXER_SERVICE_PORT}:7601" + - "${INDEXER_SERVICE_DIPS_RPC_PORT}:7602" + environment: + RUST_LOG: info,indexer_service_rs=info,indexer_monitor=debug,indexer_dips=debug + RUST_BACKTRACE: 1 + SQLX_OFFLINE: "true" + healthcheck: + interval: 10s + retries: 600 + test: curl -f http://127.0.0.1:7601/ + + indexer-agent: + entrypoint: ["bash", "-cl", "/opt/run-dips.sh"] + ports: + - "${INDEXER_MANAGEMENT_PORT}:7600" + - 9230:9230 + volumes: + - ./containers/indexer/indexer-agent/dev/run-dips.sh:/opt/run-dips.sh:ro + - ${INDEXER_AGENT_SOURCE_ROOT:?Set INDEXER_AGENT_SOURCE_ROOT to local indexer checkout}:/opt/indexer-agent-source-root + + dipper: + profiles: [] + build: + dockerfile_inline: | + FROM rust:1-slim-bookworm + RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential ca-certificates clang cmake curl git jq lld \ + pkg-config libssl-dev protobuf-compiler \ + && rm -rf /var/lib/apt/lists/* + ENV CC=clang CXX=clang++ RUSTFLAGS="-C link-arg=-fuse-ld=lld" + entrypoint: ["bash", "/opt/run.sh"] + depends_on: + block-oracle: { condition: service_healthy } + postgres: { condition: service_healthy } + gateway: { condition: service_healthy } + volumes: + - ${DIPPER_SOURCE_ROOT:?Set DIPPER_SOURCE_ROOT to local dipper checkout}:/opt/source + - ./containers/indexing-payments/dipper/run.sh:/opt/run.sh:ro + - ./containers/shared:/opt/shared:ro + - ./.env:/opt/config/.env:ro + - config-local:/opt/config:ro diff --git a/containers/indexer/indexer-agent/dev/run-dips.sh b/containers/indexer/indexer-agent/dev/run-dips.sh new file mode 100755 index 0000000..0104530 --- /dev/null +++ b/containers/indexer/indexer-agent/dev/run-dips.sh @@ -0,0 +1,89 @@ +#!/bin/bash +set -xeu +. /opt/config/.env + +. /opt/shared/lib.sh + +token_address=$(contract_addr L2GraphToken.address horizon) +staking_address=$(contract_addr HorizonStaking.address horizon) +indexer_staked="$(cast call "--rpc-url=http://chain:${CHAIN_RPC_PORT}" \ + "${staking_address}" 'hasStake(address) (bool)' "${RECEIVER_ADDRESS}")" +echo "indexer_staked=${indexer_staked}" +if [ "${indexer_staked}" = "false" ]; then + cast send "--rpc-url=http://chain:${CHAIN_RPC_PORT}" --confirmations=0 "--mnemonic=${MNEMONIC}" \ + --value=1ether "${RECEIVER_ADDRESS}" + cast send "--rpc-url=http://chain:${CHAIN_RPC_PORT}" --confirmations=0 "--mnemonic=${MNEMONIC}" \ + "${token_address}" 'transfer(address,uint256)' "${RECEIVER_ADDRESS}" '100000000000000000000000' + cast send "--rpc-url=http://chain:${CHAIN_RPC_PORT}" --confirmations=0 "--private-key=${RECEIVER_SECRET}" \ + "${token_address}" 'approve(address,uint256)' "${staking_address}" '100000000000000000000000' + cast send "--rpc-url=http://chain:${CHAIN_RPC_PORT}" --confirmations=0 "--private-key=${RECEIVER_SECRET}" \ + "${staking_address}" 'stake(uint256)' '100000000000000000000000' +fi + +export INDEXER_AGENT_HORIZON_ADDRESS_BOOK=/opt/config/horizon.json +export INDEXER_AGENT_SUBGRAPH_SERVICE_ADDRESS_BOOK=/opt/config/subgraph-service.json +export INDEXER_AGENT_TAP_ADDRESS_BOOK=/opt/config/tap-contracts.json +export INDEXER_AGENT_EPOCH_SUBGRAPH_ENDPOINT="http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/block-oracle" +export INDEXER_AGENT_GATEWAY_ENDPOINT="http://gateway:${GATEWAY_PORT}" +export INDEXER_AGENT_GRAPH_NODE_QUERY_ENDPOINT="http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}" +export INDEXER_AGENT_GRAPH_NODE_ADMIN_ENDPOINT="http://graph-node:${GRAPH_NODE_ADMIN_PORT}" +export INDEXER_AGENT_GRAPH_NODE_STATUS_ENDPOINT="http://graph-node:${GRAPH_NODE_STATUS_PORT}/graphql" +export INDEXER_AGENT_IPFS_ENDPOINT="http://ipfs:${IPFS_RPC_PORT}" +export INDEXER_AGENT_INDEXER_ADDRESS="${RECEIVER_ADDRESS}" +export INDEXER_AGENT_INDEXER_MANAGEMENT_PORT="${INDEXER_MANAGEMENT_PORT}" +export INDEXER_AGENT_INDEX_NODE_IDS=default +export INDEXER_AGENT_INDEXER_GEO_COORDINATES="1 1" +export INDEXER_AGENT_VOUCHER_REDEMPTION_THRESHOLD=0.01 +export INDEXER_AGENT_NETWORK_SUBGRAPH_ENDPOINT="http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" +export INDEXER_AGENT_NETWORK_PROVIDER="http://chain:${CHAIN_RPC_PORT}" +export INDEXER_AGENT_MNEMONIC="${INDEXER_MNEMONIC}" +export INDEXER_AGENT_POSTGRES_DATABASE=indexer_components_1 +export INDEXER_AGENT_POSTGRES_HOST=postgres +export INDEXER_AGENT_POSTGRES_PORT="${POSTGRES_PORT}" +export INDEXER_AGENT_POSTGRES_USERNAME=postgres +export INDEXER_AGENT_POSTGRES_PASSWORD= +export INDEXER_AGENT_PUBLIC_INDEXER_URL="http://indexer-service:${INDEXER_SERVICE_PORT}" +export INDEXER_AGENT_TAP_SUBGRAPH_ENDPOINT="http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/semiotic/tap" +export INDEXER_AGENT_MAX_PROVISION_INITIAL_SIZE=200000 +export INDEXER_AGENT_CONFIRMATION_BLOCKS=1 +export INDEXER_AGENT_LOG_LEVEL=trace +export INDEXER_AGENT_POLLING_INTERVAL=10000 + +# DIPs configuration +export INDEXER_AGENT_ENABLE_DIPS=true +export INDEXER_AGENT_DIPS_EPOCHS_MARGIN=1 +export INDEXER_AGENT_DIPPER_ENDPOINT="http://dipper:${DIPPER_INDEXER_RPC_PORT}" +export INDEXER_AGENT_DIPS_ALLOCATION_AMOUNT=1 + +cd /opt/indexer-agent-source-root +mkdir -p ./config/ +cat >./config/config.yaml <<-EOF +networkIdentifier: "hardhat" +indexerOptions: + geoCoordinates: [48.4682, -123.524] + defaultAllocationAmount: 10000 + allocationManagementMode: "auto" + restakeRewards: true + poiDisputeMonitoring: false + voucherRedemptionThreshold: 0.00001 + voucherRedemptionBatchThreshold: 10 + rebateClaimThreshold: 0.00001 + rebateClaimBatchThreshold: 10 +subgraphs: + maxBlockDistance: 5000 + freshnessSleepMilliseconds: 1000 +enableDips: true +dipperEndpoint: "http://dipper:${DIPPER_INDEXER_RPC_PORT}" +dipsAllocationAmount: 1 +dipsEpochsMargin: 1 +EOF +cat config/config.yaml + +nodemon --watch . \ +--ext ts \ +--legacy-watch \ +--delay 4 \ +--verbose \ +--exec " +NODE_OPTIONS=\"--inspect=0.0.0.0:9230\" +tsx packages/indexer-agent/src/index.ts start" diff --git a/containers/indexer/indexer-service/dev/run-dips.sh b/containers/indexer/indexer-service/dev/run-dips.sh new file mode 100755 index 0000000..e05815d --- /dev/null +++ b/containers/indexer/indexer-service/dev/run-dips.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -eu + +. /opt/config/.env +. /opt/shared/lib.sh + +tap_verifier=$(contract_addr TAPVerifier tap-contracts) +graph_tally_verifier=$(contract_addr GraphTallyCollector.address horizon) +subgraph_service=$(contract_addr SubgraphService.address subgraph-service) +recurring_collector=$(contract_addr RecurringCollector.address horizon) + +cat >/opt/config.toml <<-EOF +[indexer] +indexer_address = "${RECEIVER_ADDRESS}" +operator_mnemonic = "${INDEXER_MNEMONIC}" + +[database] +postgres_url = "postgresql://postgres@postgres:${POSTGRES_PORT}/indexer_components_1" + +[graph_node] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}" +status_url = "http://graph-node:${GRAPH_NODE_STATUS_PORT}/graphql" + +[subgraphs.network] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/graph-network" +recently_closed_allocation_buffer_secs = 60 +syncing_interval_secs = 30 + +[subgraphs.escrow] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL_PORT}/subgraphs/name/semiotic/tap" +syncing_interval_secs = 30 + +[blockchain] +chain_id = 1337 +receipts_verifier_address = "${tap_verifier}" +receipts_verifier_address_v2 = "${graph_tally_verifier}" +subgraph_service_address = "${subgraph_service}" + +[service] +free_query_auth_token = "freestuff" +host_and_port = "0.0.0.0:${INDEXER_SERVICE_PORT}" +url_prefix = "/" +serve_network_subgraph = false +serve_escrow_subgraph = false +ipfs_url = "http://ipfs:${IPFS_RPC_PORT}" + +[tap] +max_amount_willing_to_lose_grt = 1 + +[tap.rav_request] +timestamp_buffer_secs = 15 + +[tap.sender_aggregator_endpoints] +${ACCOUNT0_ADDRESS} = "http://tap-aggregator:${TAP_AGGREGATOR_PORT}" + +[horizon] +enabled = true + +[dips] +host = "0.0.0.0" +port = "${INDEXER_SERVICE_DIPS_RPC_PORT}" +recurring_collector = "${recurring_collector}" +allowed_payers = ["${ACCOUNT0_ADDRESS}"] + +price_per_entity = "1000" + +[dips.price_per_epoch] +"eip155:1" = "100" +"eip155:1337" = "100" + +[dips.additional_networks] +"eip155:1337" = "hardhat" +EOF +cat /opt/config.toml + +cd /opt/source +cargo build --bin indexer-service-rs +exec ./target/debug/indexer-service-rs --config=/opt/config.toml diff --git a/containers/indexing-payments/dipper/run.sh b/containers/indexing-payments/dipper/run.sh index edd9f9d..e646012 100755 --- a/containers/indexing-payments/dipper/run.sh +++ b/containers/indexing-payments/dipper/run.sh @@ -79,4 +79,11 @@ echo "=== Generated config.json ===" >&2 cat config.json >&2 echo "===========================" >&2 -dipper-service ./config.json +# Build from source if mounted, otherwise use pre-built binary +if [ -d /opt/source ] && [ -f /opt/source/Cargo.toml ]; then + cd /opt/source + cargo build --bin dipper-service --release + exec ./target/release/dipper-service "$OLDPWD/config.json" +else + exec dipper-service ./config.json +fi diff --git a/scripts/add-subgraph.sh b/scripts/add-subgraph.sh index 0b5d4f8..4ebbf51 100755 --- a/scripts/add-subgraph.sh +++ b/scripts/add-subgraph.sh @@ -23,7 +23,7 @@ deployment_hex="$(curl -s -X POST "http://${IPFS_HOST}:${IPFS_RPC_PORT}/api/v0/c deployment_hex="${deployment_hex#f01701220}" echo "deployment_hex=${deployment_hex}" -gns="$(jq -r '."1337".L2GNS.address' subgraph-service.json)" +gns="$(docker exec graph-node cat /opt/config/subgraph-service.json | jq -r '."1337".L2GNS.address')" # https://github.com/graphprotocol/contracts/blob/3eb16c80d4652c238d3e6b2c396da712af5072b4/packages/sdk/src/deployments/network/actions/gns.ts#L38 cast send --rpc-url="http://${CHAIN_HOST}:${CHAIN_RPC_PORT}" --confirmations=0 --mnemonic="${MNEMONIC}" \ diff --git a/scripts/test-dips.sh b/scripts/test-dips.sh new file mode 100755 index 0000000..9dc8d30 --- /dev/null +++ b/scripts/test-dips.sh @@ -0,0 +1,703 @@ +#!/bin/bash +# DIPs integration tests for indexer-agent. +# +# Tests that the indexer-agent correctly reads pending RCA proposals from +# the pending_rca_proposals table and creates/skips indexing rules. +# +# Prerequisites: +# - Local network running with DIPs profile (compose/dev/dips.yaml) +# - indexer-agent healthy and connected +# - pending_rca_proposals table exists (migration 23) +# +# Usage: ./scripts/test-dips.sh +set -eu + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# shellcheck source=../.env +. "$REPO_ROOT/.env" +[ -f "$REPO_ROOT/.env.local" ] && . "$REPO_ROOT/.env.local" + +AGENT_URL="http://${INDEXER_AGENT_HOST:-localhost}:${INDEXER_MANAGEMENT_PORT:-7600}" +PG_HOST="${POSTGRES_HOST:-localhost}" +PG_PORT="${POSTGRES_PORT:-5432}" +PG_DB="indexer_components_1" +PG_USER="postgres" +PGCMD="psql -h $PG_HOST -p $PG_PORT -U $PG_USER -d $PG_DB -tAq" + +export PATH="$HOME/.foundry/bin:$PATH" + +pass=0 +fail=0 +total=0 + +# ── Helpers ─────────────────────────────────────────────────────────── + +check() { + local label="$1" + local condition="$2" + total=$((total + 1)) + + if eval "$condition" > /dev/null 2>&1; then + echo " PASS $label" + pass=$((pass + 1)) + return 0 + else + echo " FAIL $label" + fail=$((fail + 1)) + return 1 + fi +} + +# Convert bytes32 (0x-prefixed, 64 hex chars) to IPFS CIDv0 (Qm...). +# Prepends the multihash prefix 0x1220 and base58-encodes. +bytes32_to_ipfs() { + local hex="${1#0x}" + python3 -c "import base58; print(base58.b58encode(bytes.fromhex('1220${hex}')).decode())" +} + +# Convert IPFS CIDv0 (Qm...) to bytes32 (0x-prefixed, 64 hex chars). +# Strips the multihash prefix 0x1220. +ipfs_to_bytes32() { + python3 -c "import base58; print('0x' + base58.b58decode('$1').hex()[4:])" +} + +gql() { + local url="$1" + local query="$2" + curl -s --max-time 10 "$url" \ + -H 'content-type: application/json' \ + -d "{\"query\": \"$query\"}" 2>/dev/null +} + +jq_test() { + local json="$1" + local expr="$2" + echo "$json" | jq -e "$expr" > /dev/null 2>&1 +} + +# Encode a valid SignedRCA payload using cast abi-encode. +# Args: $1 = deployment bytes32 (0x-prefixed, 32 bytes) +# Uses hardcoded test values for other fields. +# Outputs: hex-encoded payload (0x-prefixed) +encode_rca() { + local deployment_bytes32="$1" + + local terms + terms=$(cast abi-encode \ + "f((uint256,uint256))" \ + "(1000,50)") + + local metadata + metadata=$(cast abi-encode \ + "f((bytes32,uint8,bytes))" \ + "($deployment_bytes32,1,$terms)") + + local signed_rca + signed_rca=$(cast abi-encode \ + "f(((uint64,uint64,address,address,address,uint256,uint256,uint32,uint32,uint256,bytes),bytes))" \ + "((1900000000,2000000000,${ACCOUNT0_ADDRESS},${RECEIVER_ADDRESS},${RECEIVER_ADDRESS},10000,100,3600,86400,42,$metadata),0xaabbccdd)") + + echo "$signed_rca" +} + +# Insert a proposal row into pending_rca_proposals. +# Args: $1 = uuid, $2 = hex payload (0x-prefixed) +insert_proposal() { + local uuid="$1" + local payload_hex="$2" + # Strip 0x prefix for postgres bytea hex format + local payload_pg="\\\\x${payload_hex#0x}" + + $PGCMD -c "INSERT INTO pending_rca_proposals (id, signed_payload, version, status, created_at, updated_at) + VALUES ('$uuid', E'$payload_pg', 2, 'pending', NOW(), NOW());" +} + +# Poll management API for a DIPS indexing rule matching a deployment. +# Args: $1 = deployment IPFS hash, $2 = timeout in seconds +# Returns: 0 if found, 1 if timeout +poll_dips_rule() { + local deployment_hash="$1" + local timeout="${2:-120}" + local elapsed=0 + local interval=5 + + while [ "$elapsed" -lt "$timeout" ]; do + local rules + rules=$(gql "$AGENT_URL" \ + "{ indexingRules(merged: false) { identifier decisionBasis } }") + + if echo "$rules" | jq -e \ + ".data.indexingRules[] | select(.identifier == \"$deployment_hash\" and .decisionBasis == \"dips\")" \ + > /dev/null 2>&1; then + return 0 + fi + + sleep "$interval" + elapsed=$((elapsed + interval)) + done + return 1 +} + +# Check a proposal's status in the database. +# Args: $1 = uuid, $2 = expected status +check_proposal_status() { + local uuid="$1" + local expected="$2" + local actual + actual=$($PGCMD -c "SELECT status FROM pending_rca_proposals WHERE id = '$uuid';") + [ "$actual" = "$expected" ] +} + +# Count DIPS rules for a given deployment. +# Args: $1 = deployment IPFS hash +count_dips_rules() { + local deployment_hash="$1" + local rules + rules=$(gql "$AGENT_URL" \ + "{ indexingRules(merged: false) { identifier decisionBasis } }") + echo "$rules" | jq \ + "[.data.indexingRules[] | select(.identifier == \"$deployment_hash\" and .decisionBasis == \"dips\")] | length" +} + +# Clean up: delete a proposal from the database and remove its DIPS rule. +# Args: $1 = uuid, $2 = deployment IPFS hash (optional) +cleanup_proposal() { + local uuid="$1" + local deployment_hash="${2:-}" + + $PGCMD -c "DELETE FROM pending_rca_proposals WHERE id = '$uuid';" || true + + if [ -n "$deployment_hash" ]; then + gql "$AGENT_URL" "mutation { deleteIndexingRule(identifier: { identifier: \\\"$deployment_hash\\\", protocolNetwork: \\\"hardhat\\\" }) }" > /dev/null 2>&1 || true + fi +} + +# ── On-chain helpers (PLAN_03 scenarios) ────────────────────────────── + +HARDHAT_RPC="http://${CHAIN_HOST:-localhost}:${CHAIN_RPC_PORT:-8545}" +NETWORK_SUBGRAPH_URL="http://${GRAPH_NODE_HOST:-localhost}:${GRAPH_NODE_GRAPHQL_PORT:-8000}/subgraphs/name/graph-network" + +# Read contract addresses from the agent's config (docker volume) to avoid stale hardcoded values. +# These are regenerated on each local-network deploy, so hardcoding them breaks. +SUBGRAPH_SERVICE_ADDRESS="${SUBGRAPH_SERVICE_ADDRESS:-$(docker exec indexer-agent python3 -c "import json; print(json.load(open('/opt/config/subgraph-service.json'))['1337']['SubgraphService']['address'])" 2>/dev/null)}" +COLLECTOR_ADDRESS="${COLLECTOR_ADDRESS:-$(docker exec indexer-agent python3 -c "import json; print(json.load(open('/opt/config/horizon.json'))['1337']['GraphTallyCollector']['address'])" 2>/dev/null)}" +RECURRING_COLLECTOR_ADDRESS="${RECURRING_COLLECTOR_ADDRESS:-$(docker exec indexer-agent python3 -c "import json; print(json.load(open('/opt/config/horizon.json'))['1337']['RecurringCollector']['address'])" 2>/dev/null)}" +GRT_TOKEN="${GRT_TOKEN:-$(docker exec indexer-agent python3 -c "import json; print(json.load(open('/opt/config/horizon.json'))['1337']['L2GraphToken']['address'])" 2>/dev/null)}" +PAYMENTS_ESCROW="${PAYMENTS_ESCROW:-$(docker exec indexer-agent python3 -c "import json; print(json.load(open('/opt/config/horizon.json'))['1337']['PaymentsEscrow']['address'])" 2>/dev/null)}" + +# Encode a PROPERLY SIGNED RCA payload using cast EIP-712 signing. +# Args: $1 = deployment bytes32, $2 = deadline (optional), $3 = endsAt (optional) +# Outputs: hex-encoded signed payload (0x-prefixed) +encode_signed_rca() { + local deployment_bytes32="$1" + local deadline="${2:-$(( $(date +%s) + 7200 ))}" + local ends_at="${3:-$(( $(date +%s) + 172800 ))}" + local nonce + nonce=$(date +%s%N) # nanosecond timestamp as nonce + + # 1. ABI-encode metadata (same pattern as encode_rca) + local terms + terms=$(cast abi-encode "f((uint256,uint256))" "(50,10)") + local metadata + metadata=$(cast abi-encode "f((bytes32,uint8,bytes))" "($deployment_bytes32,0,$terms)") + + # 2. Query EIP-712 domain from the collector contract (EIP-5267) + local domain_result + domain_result=$(cast call --rpc-url "$HARDHAT_RPC" \ + "$RECURRING_COLLECTOR_ADDRESS" \ + "eip712Domain()(bytes1,string,string,uint256,address,bytes32,uint256[])" 2>/dev/null) || true + + local domain_name domain_version domain_chain_id domain_contract + if [ -n "$domain_result" ]; then + # Strip surrounding quotes from cast call string output + domain_name=$(echo "$domain_result" | sed -n '2p' | tr -d '"') + domain_version=$(echo "$domain_result" | sed -n '3p' | tr -d '"') + domain_chain_id=$(echo "$domain_result" | sed -n '4p') + domain_contract=$(echo "$domain_result" | sed -n '5p') + else + domain_name="GraphTallyCollector" + domain_version="1" + domain_chain_id=1337 + domain_contract="$RECURRING_COLLECTOR_ADDRESS" + fi + + # 3. Build EIP-712 typed data JSON + local tmpfile + tmpfile=$(mktemp /tmp/rca-typed-data-XXXXXX.json) + cat > "$tmpfile" < 0' > /dev/null 2>&1 +} + +# Ensure the payer (ACCOUNT0) has tokens deposited in PaymentsEscrow for the indexer. +# Idempotent: checks balance first, deposits only if needed. +ensure_payer_escrow() { + local amount="1000000000000000000000" # 1000 GRT (18 decimals) + + local balance + balance=$(cast call --rpc-url "$HARDHAT_RPC" \ + "$PAYMENTS_ESCROW" "getBalance(address,address,address)(uint256)" \ + "$ACCOUNT0_ADDRESS" "$RECEIVER_ADDRESS" "$RECURRING_COLLECTOR_ADDRESS" 2>/dev/null || echo "0") + + if [ "$balance" != "0" ] && [ -n "$balance" ]; then + echo " OK Payer escrow already funded ($balance)" + return 0 + fi + + echo " ... Funding payer escrow (approve + deposit)..." + cast send --rpc-url "$HARDHAT_RPC" --private-key "$ACCOUNT0_SECRET" \ + "$GRT_TOKEN" "approve(address,uint256)" "$PAYMENTS_ESCROW" "$amount" \ + --confirmations 1 > /dev/null 2>&1 + cast send --rpc-url "$HARDHAT_RPC" --private-key "$ACCOUNT0_SECRET" \ + "$PAYMENTS_ESCROW" "deposit(address,address,uint256)" "$RECEIVER_ADDRESS" "$RECURRING_COLLECTOR_ADDRESS" "$amount" \ + --confirmations 1 > /dev/null 2>&1 + echo " OK Payer escrow funded" +} + +# Ensure the payer (ACCOUNT0) has authorized itself as a signer on the RecurringCollector. +# Required for EIP-712 signed RCA acceptance. Idempotent. +ensure_signer_authorized() { + local is_auth + is_auth=$(cast call --rpc-url "$HARDHAT_RPC" \ + "$RECURRING_COLLECTOR_ADDRESS" "isAuthorized(address,address)(bool)" \ + "$ACCOUNT0_ADDRESS" "$ACCOUNT0_ADDRESS" 2>/dev/null || echo "false") + + if [ "$is_auth" = "true" ]; then + echo " OK Signer already authorized on RecurringCollector" + return 0 + fi + + echo " ... Authorizing signer on RecurringCollector..." + + local chain_id + chain_id=$(cast chain-id --rpc-url "$HARDHAT_RPC") + + local deadline=$(( $(date +%s) + 86400 )) + + # Replicate ethers.solidityPacked(['uint256','address','string','uint256','address'], [...]) + local packed + packed=$(cast abi-encode --packed \ + "f(uint256,address,string,uint256,address)" \ + "$chain_id" "$RECURRING_COLLECTOR_ADDRESS" "authorizeSignerProof" "$deadline" "$ACCOUNT0_ADDRESS") + + local hash + hash=$(cast keccak "$packed") + + # Replicate wallet.signMessage(getBytes(hash)) — cast wallet sign applies Ethereum prefix by default + local proof + proof=$(cast wallet sign --private-key "$ACCOUNT0_SECRET" "$hash") + + cast send --rpc-url "$HARDHAT_RPC" --private-key "$ACCOUNT0_SECRET" \ + "$RECURRING_COLLECTOR_ADDRESS" "authorizeSigner(address,uint256,bytes)" \ + "$ACCOUNT0_ADDRESS" "$deadline" "$proof" \ + --confirmations 1 > /dev/null 2>&1 + echo " OK Signer authorized on RecurringCollector" +} + +# ── Prerequisites ───────────────────────────────────────────────────── + +echo "=== DIPs Integration Tests ===" +echo " Agent: $AGENT_URL" +echo " Postgres: $PG_HOST:$PG_PORT/$PG_DB" +echo "" + +echo "--- Prerequisites ---" + +check "psql reachable" \ + "$PGCMD -c 'SELECT 1;'" || { echo "FATAL: Cannot reach postgres"; exit 1; } + +check "cast available" \ + "command -v cast" || { echo "FATAL: cast (foundry) not found"; exit 1; } + +check "jq available" \ + "command -v jq" || { echo "FATAL: jq not found"; exit 1; } + +check "python3 base58 available" \ + "python3 -c 'import base58'" || { echo "FATAL: python3 base58 not found (pip install base58)"; exit 1; } + +check "agent healthy" \ + "gql '$AGENT_URL' '{ indexingRules(merged: false) { identifier } }' | jq -e '.data'" \ + || { echo "FATAL: Agent not responding"; exit 1; } + +check "pending_rca_proposals table exists" \ + "$PGCMD -c \"SELECT 1 FROM pending_rca_proposals LIMIT 0;\"" \ + || { echo "FATAL: Table pending_rca_proposals does not exist. Run migration 23."; exit 1; } + +echo "" + +# ── Scenario helpers ────────────────────────────────────────────────── + +# A deployment bytes32 that is NOT currently being indexed. +# This is a deterministic fake — no real subgraph, but that's fine for rule creation tests. +NEW_DEPLOYMENT_BYTES32="0x0100000000000000000000000000000000000000000000000000000000000001" +# Convert to IPFS hash for management API queries. +# For a real test with add-subgraph.sh, replace this with the actual deployment hash. +# For now we compute it: base58(0x1220 + bytes32_without_prefix) +NEW_DEPLOYMENT_IPFS=$(bytes32_to_ipfs "$NEW_DEPLOYMENT_BYTES32") + +# An existing deployment that already has a rule (from start-indexing). +EXISTING_DEPLOYMENT_IPFS=$(gql "$AGENT_URL" \ + "{ indexingRules(merged: false) { identifier identifierType decisionBasis } }" \ + | jq -r '.data.indexingRules[] | select(.identifierType == "deployment" and .decisionBasis == "always") | .identifier' \ + | head -1) + +echo " New deployment (bytes32): $NEW_DEPLOYMENT_BYTES32" +echo " New deployment (IPFS): $NEW_DEPLOYMENT_IPFS" +echo " Existing deployment: $EXISTING_DEPLOYMENT_IPFS" +echo "" + +# ── Scenarios ───────────────────────────────────────────────────────── +# +# Scenarios 1-5, 7, 9 are batched: all proposals are inserted at once, +# then we wait for a single agent cycle to process them all. This avoids +# waiting for a separate loop cycle per scenario. +# +# Scenarios 6 and 8 run standalone (restart and valid on-chain accept). + +run_rejection_batch() { + echo "=== Batch: Rejection scenarios (1, 2, 3, 4, 5, 7, 9) ===" + echo "" + + # ── Scenario variables ── + + # Scenario 1: new deployment, fake sig + local s1_uuid="00000001-0001-0001-0001-000000000001" + + # Scenario 2: existing deployment, fake sig + local s2_uuid="00000002-0002-0002-0002-000000000002" + local s2_bytes32="" + if [ -n "$EXISTING_DEPLOYMENT_IPFS" ]; then + s2_bytes32=$(ipfs_to_bytes32 "$EXISTING_DEPLOYMENT_IPFS") + fi + + # Scenario 3: good + corrupt payload + local s3_good_uuid="00000003-0003-0003-0003-000000000001" + local s3_bad_uuid="00000003-0003-0003-0003-000000000002" + local s3_deployment="0x0200000000000000000000000000000000000000000000000000000000000003" + local s3_ipfs + s3_ipfs=$(bytes32_to_ipfs "$s3_deployment") + + # Scenario 4: blocklisted deployment + local s4_uuid="00000004-0004-0004-0004-000000000004" + local s4_deployment="0x0300000000000000000000000000000000000000000000000000000000000004" + local s4_ipfs + s4_ipfs=$(bytes32_to_ipfs "$s4_deployment") + + # Scenario 5: duplicate proposals + local s5_uuid1="00000005-0005-0005-0005-000000000001" + local s5_uuid2="00000005-0005-0005-0005-000000000002" + local s5_deployment="0x0400000000000000000000000000000000000000000000000000000000000005" + local s5_ipfs + s5_ipfs=$(bytes32_to_ipfs "$s5_deployment") + + # Scenario 7: expired deadline + local s7_uuid="00000007-0007-0007-0007-000000000007" + local s7_deployment="0x0600000000000000000000000000000000000000000000000000000000000007" + local s7_ipfs + s7_ipfs=$(bytes32_to_ipfs "$s7_deployment") + + # Scenario 9: invalid sig revert + local s9_uuid="00000009-0009-0009-0009-000000000009" + local s9_deployment="0x0700000000000000000000000000000000000000000000000000000000000009" + local s9_ipfs + s9_ipfs=$(bytes32_to_ipfs "$s9_deployment") + + # ── Cleanup ── + + cleanup_proposal "$s1_uuid" "$NEW_DEPLOYMENT_IPFS" + cleanup_proposal "$s2_uuid" + cleanup_proposal "$s3_good_uuid" "$s3_ipfs" + cleanup_proposal "$s3_bad_uuid" + cleanup_proposal "$s4_uuid" "$s4_ipfs" + cleanup_proposal "$s5_uuid1" "$s5_ipfs" + cleanup_proposal "$s5_uuid2" + cleanup_proposal "$s7_uuid" "$s7_ipfs" + cleanup_proposal "$s9_uuid" "$s9_ipfs" + + # ── Setup & Insert ── + + echo " Inserting all proposals..." + + # S1: new deployment + insert_proposal "$s1_uuid" "$(encode_rca "$NEW_DEPLOYMENT_BYTES32")" + + # S2: existing deployment + if [ -n "$s2_bytes32" ]; then + insert_proposal "$s2_uuid" "$(encode_rca "$s2_bytes32")" + fi + + # S3: good + corrupt + insert_proposal "$s3_good_uuid" "$(encode_rca "$s3_deployment")" + $PGCMD -c "INSERT INTO pending_rca_proposals (id, signed_payload, version, status, created_at, updated_at) + VALUES ('$s3_bad_uuid', E'\\\\xdeadbeef', 2, 'pending', NOW(), NOW());" + + # S4: blocklisted (set NEVER rule first) + gql "$AGENT_URL" "mutation { setIndexingRule(rule: { identifier: \\\"$s4_ipfs\\\", identifierType: deployment, decisionBasis: never, protocolNetwork: \\\"hardhat\\\" }) { identifier } }" > /dev/null + insert_proposal "$s4_uuid" "$(encode_rca "$s4_deployment")" + + # S5: duplicate proposals + local s5_payload + s5_payload=$(encode_rca "$s5_deployment") + insert_proposal "$s5_uuid1" "$s5_payload" + insert_proposal "$s5_uuid2" "$s5_payload" + + # S7: expired deadline + local s7_expired=$(( $(date +%s) - 100 )) + local s7_terms s7_metadata + s7_terms=$(cast abi-encode "f((uint256,uint256))" "(1000,50)") + s7_metadata=$(cast abi-encode "f((bytes32,uint8,bytes))" "($s7_deployment,1,$s7_terms)") + insert_proposal "$s7_uuid" "$(cast abi-encode \ + "f(((uint64,uint64,address,address,address,uint256,uint256,uint32,uint32,uint256,bytes),bytes))" \ + "(($s7_expired,2000000000,${ACCOUNT0_ADDRESS},${RECEIVER_ADDRESS},${RECEIVER_ADDRESS},10000,100,3600,86400,42,$s7_metadata),0xaabbccdd)")" + + # S9: fake sig, future deadline + local s9_deadline=$(( $(date +%s) + 7200 )) + local s9_terms s9_metadata + s9_terms=$(cast abi-encode "f((uint256,uint256))" "(1000,50)") + s9_metadata=$(cast abi-encode "f((bytes32,uint8,bytes))" "($s9_deployment,1,$s9_terms)") + insert_proposal "$s9_uuid" "$(cast abi-encode \ + "f(((uint64,uint64,address,address,address,uint256,uint256,uint32,uint32,uint256,bytes),bytes))" \ + "(($s9_deadline,2000000000,${ACCOUNT0_ADDRESS},${RECEIVER_ADDRESS},${RECEIVER_ADDRESS},10000,100,3600,86400,42,$s9_metadata),0xaabbccdd)")" + + echo " All proposals inserted, waiting for agent cycle..." + + # ── Wait for agent to process ── + # Poll scenario 1 as sentinel — once it's rejected, the cycle has run. + poll_proposal_status "$s1_uuid" "rejected" 120 + + # ── Check results ── + echo "" + + echo "--- Scenario 1: New deployment — RCA processed by agent ---" + check "1.1 Proposal rejected (fake signature)" \ + "check_proposal_status '$s1_uuid' 'rejected'" || true + + echo "--- Scenario 2: Existing deployment — proposal processed ---" + if [ -n "$s2_bytes32" ]; then + check "2.1 Proposal rejected (fake signature, existing allocation)" \ + "check_proposal_status '$s2_uuid' 'rejected'" || true + else + echo " SKIP No existing deployment found" + fi + + echo "--- Scenario 3: Corrupt payload — agent skips bad rows ---" + check "3.1 Valid proposal rejected (fake signature)" \ + "check_proposal_status '$s3_good_uuid' 'rejected'" || true + check "3.2 Corrupt proposal still pending (not crashed)" \ + "check_proposal_status '$s3_bad_uuid' 'pending'" || true + + echo "--- Scenario 4: Blocklisted deployment — proposal rejected ---" + check "4.1 Proposal rejected for blocklisted deployment" \ + "check_proposal_status '$s4_uuid' 'rejected'" || true + + echo "--- Scenario 5: Duplicate proposals — both processed ---" + check "5.1 First proposal rejected" \ + "check_proposal_status '$s5_uuid1' 'rejected'" || true + check "5.2 Second proposal rejected" \ + "check_proposal_status '$s5_uuid2' 'rejected'" || true + + echo "--- Scenario 7: Expired deadline — proposal rejected ---" + check "7.1 Proposal rejected with expired deadline" \ + "check_proposal_status '$s7_uuid' 'rejected'" || true + local s7_dips_count + s7_dips_count=$(count_dips_rules "$s7_ipfs") + check "7.2 No DIPS rule left for expired proposal" \ + "[ '$s7_dips_count' -eq 0 ]" || true + + echo "--- Scenario 9: Invalid signature — contract revert → rejected ---" + check "9.1 Proposal rejected after contract revert" \ + "check_proposal_status '$s9_uuid' 'rejected'" || true + local s9_dips_count + s9_dips_count=$(count_dips_rules "$s9_ipfs") + check "9.2 DIPS rule cleaned up after rejection" \ + "[ '$s9_dips_count' -eq 0 ]" || true + + # ── Cleanup all ── + echo "" + echo " Cleaning up..." + cleanup_proposal "$s1_uuid" "$NEW_DEPLOYMENT_IPFS" + cleanup_proposal "$s2_uuid" + cleanup_proposal "$s3_good_uuid" "$s3_ipfs" + cleanup_proposal "$s3_bad_uuid" + cleanup_proposal "$s4_uuid" "$s4_ipfs" + cleanup_proposal "$s5_uuid1" "$s5_ipfs" + cleanup_proposal "$s5_uuid2" + cleanup_proposal "$s7_uuid" "$s7_ipfs" + cleanup_proposal "$s9_uuid" "$s9_ipfs" + echo "" +} + +scenario_6_agent_restart() { + echo "=== Scenario 6: Agent restart — proposals survive and get processed ===" + local uuid="00000006-0006-0006-0006-000000000006" + local deployment="0x0500000000000000000000000000000000000000000000000000000000000006" + local ipfs + ipfs=$(bytes32_to_ipfs "$deployment") + + cleanup_proposal "$uuid" "$ipfs" + + insert_proposal "$uuid" "$(encode_rca "$deployment")" + + echo " Inserted proposal, reloading agent..." + docker exec indexer-agent touch /opt/indexer-agent-source-root/packages/indexer-agent/src/index.ts 2>/dev/null \ + || echo " (Could not trigger reload via touch)" + + sleep 15 + local elapsed=0 + while [ "$elapsed" -lt 60 ]; do + if gql "$AGENT_URL" "{ indexingRules(merged: false) { identifier } }" | jq -e '.data' > /dev/null 2>&1; then + break + fi + sleep 5 + elapsed=$((elapsed + 5)) + done + + check "6.1 Proposal processed after restart" \ + "poll_proposal_status '$uuid' 'rejected' 120" || true + + cleanup_proposal "$uuid" "$ipfs" + echo "" +} + +scenario_8_onchain_accept() { + echo "=== Scenario 8: Valid on-chain accept — proposal accepted ===" + + # Use a unique deployment per run to avoid AllocationAlreadyHasIndexingAgreement. + # The agent creates a fresh allocation via multicall (startService + acceptIndexingAgreement). + local ts + ts=$(date +%s) + local deployment_bytes32 + deployment_bytes32=$(printf "0x08%062x" "$ts") + local deployment_ipfs + deployment_ipfs=$(bytes32_to_ipfs "$deployment_bytes32") + + local uuid="00000008-0008-0008-0008-000000000008" + + cleanup_proposal "$uuid" "$deployment_ipfs" + ensure_payer_escrow + ensure_signer_authorized + + local deadline=$(( ts + 7200 )) + local ends_at=$(( ts + 172800 )) + local payload + payload=$(encode_signed_rca "$deployment_bytes32" "$deadline" "$ends_at") + + if [ -z "$payload" ] || [ "$payload" = "" ]; then + echo " SKIP Failed to encode signed RCA" + return + fi + + insert_proposal "$uuid" "$payload" + echo " Inserted signed proposal for $deployment_ipfs (new allocation via multicall), waiting..." + + check "8.1 Proposal accepted on-chain" \ + "poll_proposal_status '$uuid' 'accepted' 120" || true + + cleanup_proposal "$uuid" "$deployment_ipfs" + echo "" +} + +# ── Run ─────────────────────────────────────────────────────────────── + +run_rejection_batch +scenario_6_agent_restart +scenario_8_onchain_accept + +# ── Summary ─────────────────────────────────────────────────────────── + +echo "=== Results ===" +echo " $pass passed, $fail failed, $total total" + +if [ "$fail" -eq 0 ]; then + echo " All DIPs integration tests passed." + exit 0 +else + echo " Some tests failed." + exit 1 +fi