Feature/devel instructions (#19)
* improve developer docs * fix readme Signed-off-by: mihaisc <mihai.scarlat@smartcontrol.ro> * update gitignore Signed-off-by: mihaisc <mihai.scarlat@smartcontrol.ro> * .env, and infura key Signed-off-by: mihaisc <mihai.scarlat@smartcontrol.ro> Co-authored-by: mihaisc <mihai.scarlat@smartcontrol.ro>
This commit is contained in:
parent
6b304e0386
commit
9b892f1cac
|
@ -13,6 +13,9 @@ pids
|
|||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
/docker/data
|
||||
/src/types
|
||||
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
@ -33,7 +36,7 @@ bower_components
|
|||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
build
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
|
|
46
README.md
46
README.md
|
@ -75,39 +75,25 @@ This subgraph is deployed for all networks the Ocean Protocol contracts are depl
|
|||
|
||||
## 🦑 Development
|
||||
|
||||
Prepare the docker setup:
|
||||
```bash
|
||||
npm i
|
||||
```
|
||||
|
||||
- Install/run the Graph: `https://thegraph.com/docs/quick-start`
|
||||
|
||||
- You can skip running ganache-cli and connect directly to `mainnet` using Infura
|
||||
|
||||
```bash
|
||||
git clone https://github.com/graphprotocol/graph-node/
|
||||
cd graph-node/docker
|
||||
cd docker
|
||||
./setup.sh
|
||||
# Update this line in the `docker-compose.yml` file with your Infura ProjectId
|
||||
# ethereum: 'mainnet:https://mainnet.infura.io/v3/INFURA_PROJECT_ID'
|
||||
```
|
||||
Edit docker-compose and add your infura key & network
|
||||
|
||||
Start :
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
Note: making contract calls using Infura fails with `missing trie node` errors. The fix requires editing `ethereum_adapter.rs` line 434 to use the latest block instead of a specific block number. Replace: `web3.eth().call(req, Some(block_id)).then(|result| {` with `web3.eth().call(req, Some(BlockNumber::Latest.into())).then(|result| {`
|
||||
|
||||
To run the graph-node with this fix it must be run from source.
|
||||
|
||||
First, delete the `graph-node` container from the `docker-compose.yml` file
|
||||
then run `docker-compose up` to get the postgresql and ipfs services running.
|
||||
|
||||
Now you can build and run the graph-node from source
|
||||
|
||||
```
|
||||
cargo run -p graph-node --release > graphnode.log --
|
||||
--postgres-url postgres://graph-node:let-me-in@localhost:5432/graph-node
|
||||
--ethereum-rpc mainnet:https://mainnet.infura.io/v3/INFURA_PROJECT_ID
|
||||
--ipfs 127.0.0.1:5001
|
||||
To use with ifura key create a .env file (look at .env.example)
|
||||
```bash
|
||||
docker-compose --env-file .env up
|
||||
```
|
||||
|
||||
Switch to a new terminal:
|
||||
|
||||
|
||||
- Once the graph node is ready, do the following to deploy the ocean-subgraph to the local graph-node
|
||||
|
||||
```bash
|
||||
|
@ -115,15 +101,15 @@ git clone https://github.com/oceanprotocol/ocean-subgraph/
|
|||
cd ocean-subgraph
|
||||
npm i
|
||||
npm run codegen
|
||||
npm run create:local
|
||||
npm run deploy:local
|
||||
npm run create:local-rinkeby
|
||||
npm run deploy:local-rinkeby
|
||||
```
|
||||
|
||||
- You can edit the event handler code and then run `npm run deploy:local`
|
||||
- Running deploy will fail if the code has no changes
|
||||
- Sometimes deploy will fail no matter what, in this case:
|
||||
- Stop the docker-compose run (`docker-compose down`)
|
||||
- Delete the `ipfs` and `postgres` folders in `graph-node/docker/data`
|
||||
- Delete the `ipfs` and `postgres` folders in `docker/data`
|
||||
- Restart docker-compose
|
||||
- Run `npm run create:local` to create the ocean-subgraph
|
||||
- Run `npm run deploy:local` to deploy the ocean-subgraph
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
INFURA_PROJECT_ID="xxx"
|
|
@ -0,0 +1,80 @@
|
|||
# Graph Node Docker Image
|
||||
|
||||
Preconfigured Docker image for running a Graph Node.
|
||||
|
||||
## Usage
|
||||
|
||||
```sh
|
||||
docker run -it \
|
||||
-e postgres_host=<HOST> \
|
||||
-e postgres_port=<PORT> \
|
||||
-e postgres_user=<USER> \
|
||||
-e postgres_pass=<PASSWORD> \
|
||||
-e postgres_db=<DBNAME> \
|
||||
-e ipfs=<HOST>:<PORT> \
|
||||
-e ethereum=<NETWORK_NAME>:<ETHEREUM_RPC_URL> \
|
||||
graphprotocol/graph-node:latest
|
||||
```
|
||||
|
||||
### Example usage
|
||||
|
||||
```sh
|
||||
docker run -it \
|
||||
-e postgres_host=host.docker.internal \
|
||||
-e postgres_port=5432 \
|
||||
-e postgres_user=graph-node \
|
||||
-e postgres_pass=oh-hello \
|
||||
-e postgres_db=graph-node \
|
||||
-e ipfs=host.docker.internal:5001 \
|
||||
-e ethereum=mainnet:http://localhost:8545/ \
|
||||
graphprotocol/graph-node:latest
|
||||
```
|
||||
|
||||
## Docker Compose
|
||||
|
||||
The Docker Compose setup requires an Ethereum network name and node
|
||||
to connect to. By default, it will use `mainnet:http://host.docker.internal:8545`
|
||||
in order to connect to an Ethereum node running on your host machine.
|
||||
You can replace this with anything else in `docker-compose.yaml`.
|
||||
|
||||
> **Note for Linux users:** On Linux, `host.docker.internal` is not
|
||||
> currently supported. Instead, you will have to replace it with the
|
||||
> IP address of your Docker host (from the perspective of the Graph
|
||||
> Node container).
|
||||
> To do this, run:
|
||||
>
|
||||
> ```
|
||||
> CONTAINER_ID=$(docker container ls | grep graph-node | cut -d' ' -f1)
|
||||
> docker exec $CONTAINER_ID /bin/bash -c 'apt install -y iproute2 && ip route' | awk '/^default via /{print $3}'
|
||||
> ```
|
||||
>
|
||||
> This will print the host's IP address. Then, put it into `docker-compose.yml`:
|
||||
>
|
||||
> ```
|
||||
> sed -i -e 's/host.docker.internal/<IP ADDRESS>/g' docker-compose.yml
|
||||
> ```
|
||||
|
||||
After you have set up an Ethereum node—e.g. Ganache or Parity—simply
|
||||
clone this repository and run
|
||||
|
||||
```sh
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
This will start IPFS, Postgres and Graph Node in Docker and create persistent
|
||||
data directories for IPFS and Postgres in `./data/ipfs` and `./data/postgres`. You
|
||||
can access these via:
|
||||
|
||||
- Graph Node:
|
||||
- GraphiQL: `http://localhost:8000/`
|
||||
- HTTP: `http://localhost:8000/subgraphs/name/<subgraph-name>`
|
||||
- WebSockets: `ws://localhost:8001/subgraphs/name/<subgraph-name>`
|
||||
- Admin: `http://localhost:8020/`
|
||||
- IPFS:
|
||||
- `127.0.0.1:5001` or `/ip4/127.0.0.1/tcp/5001`
|
||||
- Postgres:
|
||||
- `postgresql://graph-node:let-me-in@localhost:5432/graph-node`
|
||||
|
||||
Once this is up and running, you can use
|
||||
[`graph-cli`](https://github.com/graphprotocol/graph-cli) to create and
|
||||
deploy your subgraph to the running Graph Node.
|
|
@ -0,0 +1,11 @@
|
|||
#! /bin/bash
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
echo "usage: create <name>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
api="http://index-node.default/"
|
||||
|
||||
data=$(printf '{"jsonrpc": "2.0", "method": "subgraph_create", "params": {"name":"%s"}, "id":"1"}' "$1")
|
||||
curl -s -H "content-type: application/json" --data "$data" "$api"
|
|
@ -0,0 +1,9 @@
|
|||
#! /bin/bash
|
||||
|
||||
if [ -f "$1" ]
|
||||
then
|
||||
exec rust-gdb -c "$1" /usr/local/cargo/bin/graph-node
|
||||
else
|
||||
echo "usage: debug <core-file>"
|
||||
exit 1
|
||||
fi
|
|
@ -0,0 +1,12 @@
|
|||
#! /bin/bash
|
||||
|
||||
if [ $# != 3 ]; then
|
||||
echo "usage: deploy <name> <ipfs_hash> <node>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
api="http://index-node.default/"
|
||||
|
||||
echo "Deploying $1 (deployment $2)"
|
||||
data=$(printf '{"jsonrpc": "2.0", "method": "subgraph_deploy", "params": {"name":"%s", "ipfs_hash":"%s", "node_id":"%s"}, "id":"1"}' "$1" "$2" "$3")
|
||||
curl -s -H "content-type: application/json" --data "$data" "$api"
|
|
@ -0,0 +1,12 @@
|
|||
#! /bin/bash
|
||||
|
||||
if [ $# -lt 3 ]; then
|
||||
echo "usage: reassign <name> <ipfs_hash> <node>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
api="http://index-node.default/"
|
||||
|
||||
echo Assigning to "$3"
|
||||
data=$(printf '{"jsonrpc": "2.0", "method": "subgraph_reassign", "params": {"name":"%s", "ipfs_hash":"%s", "node_id":"%s"}, "id":"1"}' "$1" "$2" "$3")
|
||||
curl -s -H "content-type: application/json" --data "$data" "$api"
|
|
@ -0,0 +1,11 @@
|
|||
#! /bin/bash
|
||||
|
||||
if [ $# != 1 ]; then
|
||||
echo "usage: create <name>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
api="http://index-node.default/"
|
||||
|
||||
data=$(printf '{"jsonrpc": "2.0", "method": "subgraph_remove", "params": {"name":"%s"}, "id":"1"}' "$1")
|
||||
curl -s -H "content-type: application/json" --data "$data" "$api"
|
|
@ -0,0 +1,38 @@
|
|||
version: '3'
|
||||
services:
|
||||
graph-node:
|
||||
image: oceanprotocol/graph-node:latest
|
||||
ports:
|
||||
- '8000:8000'
|
||||
- '8001:8001'
|
||||
- '8020:8020'
|
||||
- '8030:8030'
|
||||
- '8040:8040'
|
||||
depends_on:
|
||||
- ipfs
|
||||
- postgres
|
||||
environment:
|
||||
postgres_host: postgres
|
||||
postgres_user: graph-node
|
||||
postgres_pass: let-me-in
|
||||
postgres_db: graph-node
|
||||
ipfs: 'ipfs:5001'
|
||||
ethereum: 'rinkeby:https://rinkeby.infura.io/v3/${INFURA_PROJECT_ID}'
|
||||
RUST_LOG: info
|
||||
ipfs:
|
||||
image: ipfs/go-ipfs:v0.4.23
|
||||
ports:
|
||||
- '5001:5001'
|
||||
volumes:
|
||||
- ./data/ipfs:/data/ipfs
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- '5432:5432'
|
||||
command: ['postgres', '-cshared_preload_libraries=pg_stat_statements']
|
||||
environment:
|
||||
POSTGRES_USER: graph-node
|
||||
POSTGRES_PASSWORD: let-me-in
|
||||
POSTGRES_DB: graph-node
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
echo "Setting SOURCE_BRANCH to ${SOURCE_BRANCH}"
|
||||
|
||||
sed -i "s@^ENV SOURCE_BRANCH \"master\"@ENV SOURCE_BRANCH \"${SOURCE_BRANCH}\"@g" Dockerfile
|
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
if ! which docker 2>&1 > /dev/null; then
|
||||
echo "Please install 'docker' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! which docker-compose 2>&1 > /dev/null; then
|
||||
echo "Please install 'docker-compose' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! which jq 2>&1 > /dev/null; then
|
||||
echo "Please install 'jq' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the graph-node container
|
||||
docker-compose up --no-start graph-node
|
||||
|
||||
# Start graph-node so we can inspect it
|
||||
docker-compose start graph-node
|
||||
|
||||
# Identify the container ID
|
||||
CONTAINER_ID=$(docker container ls | grep graph-node | cut -d' ' -f1)
|
||||
|
||||
# Inspect the container to identify the host IP address
|
||||
HOST_IP=$(docker inspect "$CONTAINER_ID" | jq -r .[0].NetworkSettings.Networks[].Gateway)
|
||||
|
||||
echo "Host IP: $HOST_IP"
|
||||
|
||||
# Inject the host IP into docker-compose.yml
|
||||
sed -i -e "s/host.docker.internal/$HOST_IP/g" docker-compose.yml
|
||||
|
||||
function stop_graph_node {
|
||||
# Ensure graph-node is stopped
|
||||
docker-compose stop graph-node
|
||||
}
|
||||
|
||||
trap stop_graph_node EXIT
|
|
@ -0,0 +1,111 @@
|
|||
#!/bin/bash
|
||||
|
||||
save_coredumps() {
|
||||
graph_dir=/var/lib/graph
|
||||
datestamp=$(date +"%Y-%m-%dT%H:%M:%S")
|
||||
ls /core.* >& /dev/null && have_cores=yes || have_cores=no
|
||||
if [ -d "$graph_dir" -a "$have_cores" = yes ]
|
||||
then
|
||||
core_dir=$graph_dir/cores
|
||||
mkdir -p $core_dir
|
||||
exec >> "$core_dir"/messages 2>&1
|
||||
echo "${HOSTNAME##*-} Saving core dump on ${HOSTNAME} at ${datestamp}"
|
||||
|
||||
dst="$core_dir/$datestamp-${HOSTNAME}"
|
||||
mkdir "$dst"
|
||||
cp /usr/local/bin/graph-node "$dst"
|
||||
cp /proc/loadavg "$dst"
|
||||
[ -f /Dockerfile ] && cp /Dockerfile "$dst"
|
||||
tar czf "$dst/etc.tgz" /etc/
|
||||
dmesg -e > "$dst/dmesg"
|
||||
# Capture environment variables, but filter out passwords
|
||||
env | sort | sed -r -e 's/^(postgres_pass|ELASTICSEARCH_PASSWORD)=.*$/\1=REDACTED/' > "$dst/env"
|
||||
|
||||
for f in /core.*
|
||||
do
|
||||
echo "${HOSTNAME##*-} Found core dump $f"
|
||||
mv "$f" "$dst"
|
||||
done
|
||||
echo "${HOSTNAME##*-} Saving done"
|
||||
fi
|
||||
}
|
||||
|
||||
wait_for_ipfs() {
|
||||
# Take the IPFS URL in $1 apart and extract host and port. If no explicit
|
||||
# host is given, use 443 for https, and 80 otherwise
|
||||
if [[ "$1" =~ ^((https?)://)?([^:/]+)(:([0-9]+))? ]]
|
||||
then
|
||||
proto=${BASH_REMATCH[2]:-http}
|
||||
host=${BASH_REMATCH[3]}
|
||||
port=${BASH_REMATCH[5]}
|
||||
if [ -z "$port" ]
|
||||
then
|
||||
[ "$proto" = "https" ] && port=443 || port=80
|
||||
fi
|
||||
wait_for "$host:$port" -t 120
|
||||
else
|
||||
echo "invalid IPFS URL: $1"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_query_node() {
|
||||
export DISABLE_BLOCK_INGESTOR=true
|
||||
graph-node \
|
||||
--postgres-url "$postgres_url" \
|
||||
--ethereum-rpc $ethereum \
|
||||
--ipfs "$ipfs"
|
||||
}
|
||||
|
||||
start_index_node() {
|
||||
# Only the index node with the name set in BLOCK_INGESTOR should ingest
|
||||
# blocks
|
||||
if [[ ${node_id} != "${BLOCK_INGESTOR}" ]]; then
|
||||
export DISABLE_BLOCK_INGESTOR=true
|
||||
fi
|
||||
|
||||
graph-node \
|
||||
--node-id "${node_id//-/_}" \
|
||||
--postgres-url "$postgres_url" \
|
||||
--ethereum-rpc $ethereum \
|
||||
--ipfs "$ipfs"
|
||||
}
|
||||
|
||||
start_combined_node() {
|
||||
graph-node \
|
||||
--postgres-url "$postgres_url" \
|
||||
--ethereum-rpc $ethereum \
|
||||
--ipfs "$ipfs"
|
||||
}
|
||||
|
||||
postgres_port=${postgres_port:-5432}
|
||||
postgres_url="postgresql://$postgres_user:$postgres_pass@$postgres_host:$postgres_port/$postgres_db"
|
||||
|
||||
wait_for_ipfs "$ipfs"
|
||||
wait_for "$postgres_host:$postgres_port" -t 120
|
||||
sleep 5
|
||||
|
||||
trap save_coredumps EXIT
|
||||
|
||||
export PGAPPNAME="${node_id-$HOSTNAME}"
|
||||
|
||||
# Set custom poll interval
|
||||
if [ -n "$ethereum_polling_interval" ]; then
|
||||
export ETHEREUM_POLLING_INTERVAL=$ethereum_polling_interval
|
||||
fi
|
||||
|
||||
case "${node_role-combined-node}" in
|
||||
query-node)
|
||||
start_query_node
|
||||
;;
|
||||
index-node)
|
||||
start_index_node
|
||||
;;
|
||||
combined-node)
|
||||
start_combined_node
|
||||
;;
|
||||
*)
|
||||
echo "Unknown mode for start-node: $1"
|
||||
echo "usage: start (combined-node|query-node|index-node)"
|
||||
exit 1
|
||||
esac
|
|
@ -0,0 +1,83 @@
|
|||
#!/bin/sh
|
||||
|
||||
# POSIX compatible clone of wait-for-it.sh
|
||||
# This copy is from https://github.com/eficode/wait-for/commits/master
|
||||
# at commit 8d9b4446
|
||||
|
||||
TIMEOUT=15
|
||||
QUIET=0
|
||||
|
||||
echoerr() {
|
||||
if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi
|
||||
}
|
||||
|
||||
usage() {
|
||||
exitcode="$1"
|
||||
cat << USAGE >&2
|
||||
Usage:
|
||||
$cmdname host:port [-t timeout] [-- command args]
|
||||
-q | --quiet Do not output any status messages
|
||||
-t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout
|
||||
-- COMMAND ARGS Execute command with args after the test finishes
|
||||
USAGE
|
||||
exit "$exitcode"
|
||||
}
|
||||
|
||||
wait_for() {
|
||||
for i in `seq $TIMEOUT` ; do
|
||||
nc -z "$HOST" "$PORT" > /dev/null 2>&1
|
||||
|
||||
result=$?
|
||||
if [ $result -eq 0 ] ; then
|
||||
if [ $# -gt 0 ] ; then
|
||||
exec "$@"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "Operation timed out" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
while [ $# -gt 0 ]
|
||||
do
|
||||
case "$1" in
|
||||
*:* )
|
||||
HOST=$(printf "%s\n" "$1"| cut -d : -f 1)
|
||||
PORT=$(printf "%s\n" "$1"| cut -d : -f 2)
|
||||
shift 1
|
||||
;;
|
||||
-q | --quiet)
|
||||
QUIET=1
|
||||
shift 1
|
||||
;;
|
||||
-t)
|
||||
TIMEOUT="$2"
|
||||
if [ "$TIMEOUT" = "" ]; then break; fi
|
||||
shift 2
|
||||
;;
|
||||
--timeout=*)
|
||||
TIMEOUT="${1#*=}"
|
||||
shift 1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
--help)
|
||||
usage 0
|
||||
;;
|
||||
*)
|
||||
echoerr "Unknown argument: $1"
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$HOST" = "" -o "$PORT" = "" ]; then
|
||||
echoerr "Error: you need to provide a host and port to test."
|
||||
usage 2
|
||||
fi
|
||||
|
||||
wait_for "$@"
|
Loading…
Reference in New Issue