Problem: statsd config is no longer supported

Solution: remove monitoring code, completes #1138
This commit is contained in:
vrde 2018-02-13 14:09:41 +01:00 committed by Sylvain Bellemare
parent c35e37be64
commit 4157244df7
12 changed files with 4 additions and 227 deletions

View File

@ -123,9 +123,6 @@ config = {
'granular_levels': {},
'port': log_config['root']['port']
},
'graphite': {
'host': os.environ.get('BIGCHAINDB_GRAPHITE_HOST', 'localhost'),
},
}
# We need to maintain a backup copy of the original config dict in case

View File

@ -1,5 +1,4 @@
import random
import statsd
from time import time
from bigchaindb import exceptions as core_exceptions
@ -72,8 +71,6 @@ class Bigchain(object):
if not self.me or not self.me_private:
raise exceptions.KeypairNotFoundException()
self.statsd = statsd.StatsClient(bigchaindb.config['graphite']['host'])
federation = property(lambda self: set(self.nodes_except_me + [self.me]))
""" Set of federation member public keys """

View File

@ -126,8 +126,6 @@ class BlockPipeline:
logger.info('Write new block %s with %s transactions',
block.id, len(block.transactions))
self.bigchain.write_block(block)
self.bigchain.statsd.incr('pipelines.block.throughput',
len(block.transactions))
return block
def delete_tx(self, block):

View File

@ -149,7 +149,6 @@ class Vote:
logger.info("Voting '%s' for block %s", validity,
vote['vote']['voting_for_block'])
self.bigchain.write_vote(vote)
self.bigchain.statsd.incr('pipelines.vote.throughput', num_tx)
return vote

View File

@ -82,7 +82,6 @@ class TransactionListApi(Resource):
)
with pool() as bigchain:
bigchain.statsd.incr('web.tx.post')
try:
bigchain.validate_transaction(tx_obj)
except ValidationError as e:

View File

@ -22,16 +22,6 @@ services:
BIGCHAINDB_DATABASE_HOST: mdb
BIGCHAINDB_DATABASE_PORT: 27017
BIGCHAINDB_SERVER_BIND: 0.0.0.0:9984
BIGCHAINDB_GRAPHITE_HOST: graphite
ports:
- "9984"
command: bigchaindb start
graphite:
image: hopsoft/graphite-statsd
ports:
- "2003-2004"
- "2023-2024"
- "8125/udp"
- "8126"
- "80"

View File

@ -48,7 +48,6 @@ For convenience, here's a list of all the relevant environment variables (docume
`BIGCHAINDB_DATABASE_KEYFILE`<br>
`BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE`<br>
`BIGCHAINDB_DATABASE_CRLFILE`<br>
`BIGCHAINDB_GRAPHITE_HOST`<br>
The local config file is `$HOME/.bigchaindb` by default (a file which might not even exist), but you can tell BigchainDB to use a different file by using the `-c` command-line option, e.g. `bigchaindb -c path/to/config_file.json start`
or using the `BIGCHAINDB_CONFIG_PATH` environment variable, e.g. `BIGHAINDB_CONFIG_PATH=.my_bigchaindb_config bigchaindb start`.
@ -217,7 +216,7 @@ export BIGCHAINDB_SERVER_WORKERS=5
## wsserver.scheme, wsserver.host and wsserver.port
These settings are for the
[aiohttp server](https://aiohttp.readthedocs.io/en/stable/index.html),
[aiohttp server](https://aiohttp.readthedocs.io/en/stable/index.html),
which is used to serve the
[WebSocket Event Stream API](../websocket-event-stream-api.html).
`wsserver.scheme` should be either `"ws"` or `"wss"`
@ -360,8 +359,8 @@ The full paths to the files where logs and error logs should be written to.
}
```
**Defaults to**:
**Defaults to**:
* `"~/bigchaindb.log"`
* `"~/bigchaindb-errors.log"`
@ -383,7 +382,7 @@ For example if we consider the log file setting:
```
logs would always be written to `bigchain.log`. Each time the file
`bigchain.log` reaches 200 MB it would be closed and renamed
`bigchain.log` reaches 200 MB it would be closed and renamed
`bigchain.log.1`. If `bigchain.log.1` and `bigchain.log.2` already exist they
would be renamed `bigchain.log.2` and `bigchain.log.3`. This pattern would be
applied up to `bigchain.log.5` after which `bigchain.log.5` would be
@ -550,29 +549,3 @@ The port number at which the logging server should listen.
```
**Defaults to**: `9020`
## graphite.host
The host name or IP address of a server listening for statsd events on UDP
port 8125. This defaults to `localhost`, and if no statsd collector is running,
the events are simply dropped by the operating system.
**Example using environment variables**
```text
export BIGCHAINDB_GRAPHITE_HOST=10.0.0.5
```
**Example config file snippet**
```js
"graphite": {
"host": "10.0.0.5"
}
```
**Default values (from a config file)**
```js
"graphite": {
"host": "localhost"
}
```

View File

@ -1,40 +0,0 @@
# Benchmarks
## CREATE transaction throughput
This is a measurement of the throughput of CREATE transactions through the entire
pipeline, ie, the web frontend, block creation, and block validation, where the
output of the measurement is transactions per second.
The benchmark runs for a fixed period of time and makes metrics available via
a graphite interface.
### Running the benchmark
Dependencies:
* Python 3.5+
* docker-compose 1.8.0+
* docker 1.12+
To start:
$ python3 scripts/benchmarks/create_thoughtput.py
To start using a separate namespace for docker-compose:
$ COMPOSE_PROJECT_NAME=somename python3 scripts/benchmarks/create_thoughtput.py
### Results
A test was run on AWS with the following instance configuration:
* Ubuntu Server 16.04 (ami-060cde69)
* 32 core compute optimized (c3.8xlarge)
* 100gb root volume (300/3000 IOPS)
The server received and validated over 800 transactions per second:
![BigchainDB transaction throughput](https://cloud.githubusercontent.com/assets/125019/26688641/85d56d1e-46f3-11e7-8148-bf3bc8c54c33.png)
For more information on how the benchmark was run, the abridged session buffer [is available](https://gist.github.com/libscott/8a37c5e134b2d55cfb55082b1cd85a02).

View File

@ -1,133 +0,0 @@
import sys
import math
import time
import requests
import subprocess
import multiprocessing
def main():
cmd('docker-compose -f docker-compose.yml -f benchmark.yml up -d mdb')
cmd('docker-compose -f docker-compose.yml -f benchmark.yml up -d bdb')
cmd('docker-compose -f docker-compose.yml -f benchmark.yml up -d graphite')
out = cmd('docker-compose -f benchmark.yml port graphite 80', capture=True)
graphite_web = 'http://localhost:%s/' % out.strip().split(':')[1]
print('Graphite web interface at: ' + graphite_web)
start = time.time()
cmd('docker-compose -f docker-compose.yml -f benchmark.yml exec bdb python %s load' % sys.argv[0])
mins = math.ceil((time.time() - start) / 60) + 1
graph_url = graphite_web + 'render/?width=900&height=600&_salt=1495462891.335&target=stats.pipelines.block.throughput&target=stats.pipelines.vote.throughput&target=stats.web.tx.post&from=-%sminutes' % mins # noqa
print(graph_url)
def load():
from bigchaindb.core import Bigchain
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.common.transaction import Transaction
def transactions():
priv, pub = generate_key_pair()
tx = Transaction.create([pub], [([pub], 1)])
while True:
i = yield tx.to_dict()
tx.asset = {'data': {'n': i}}
tx.sign([priv])
def wait_for_up():
print('Waiting for server to start... ', end='')
while True:
try:
requests.get('http://localhost:9984/')
break
except requests.ConnectionError:
time.sleep(0.1)
print('Ok')
def post_txs():
txs = transactions()
txs.send(None)
try:
with requests.Session() as session:
while True:
i = tx_queue.get()
if i is None:
break
tx = txs.send(i)
res = session.post('http://localhost:9984/api/v1/transactions/', json=tx)
assert res.status_code == 202
except KeyboardInterrupt:
pass
wait_for_up()
num_clients = 30
test_time = 60
tx_queue = multiprocessing.Queue(maxsize=num_clients)
txn = 0
b = Bigchain()
start_time = time.time()
for i in range(num_clients):
multiprocessing.Process(target=post_txs).start()
print('Sending transactions')
while time.time() - start_time < test_time:
# Post 500 transactions to the server
for i in range(500):
tx_queue.put(txn)
txn += 1
print(txn)
while True:
# Wait for the server to reduce the backlog to below
# 10000 transactions. The expectation is that 10000 transactions
# will not be processed faster than a further 500 transactions can
# be posted, but nonetheless will be processed within a few seconds.
# This keeps the test from running on and keeps the transactions from
# being considered stale.
count = b.connection.db.backlog.count()
if count > 10000:
time.sleep(0.2)
else:
break
for i in range(num_clients):
tx_queue.put(None)
print('Waiting to clear backlog')
while True:
bl = b.connection.db.backlog.count()
if bl == 0:
break
print(bl)
time.sleep(1)
print('Waiting for all votes to come in')
while True:
blocks = b.connection.db.bigchain.count()
votes = b.connection.db.votes.count()
if blocks == votes + 1:
break
print('%s blocks, %s votes' % (blocks, votes))
time.sleep(3)
print('Finished')
def cmd(command, capture=False):
stdout = subprocess.PIPE if capture else None
args = ['bash', '-c', command]
proc = subprocess.Popen(args, stdout=stdout)
assert not proc.wait()
return capture and proc.stdout.read().decode()
if sys.argv[1:] == ['load']:
load()
else:
main()

View File

@ -82,7 +82,6 @@ install_requires = [
'pyyaml~=3.12',
'aiohttp~=2.3',
'python-rapidjson-schema==0.1.1',
'statsd==3.2.1',
'abci~=0.3.0',
]

View File

@ -304,7 +304,6 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request, ssl_con
'granular_levels': {},
'port': 9020
},
'graphite': {'host': 'localhost'},
}

View File

@ -24,7 +24,6 @@ def config(request, monkeypatch):
'keyring': [],
'CONFIGURED': True,
'backlog_reassign_delay': 30,
'graphite': {'host': 'localhost'},
}
monkeypatch.setattr('bigchaindb.config', config)