From 843d65d2335de9de5797ec5b27e5674a1432a5f8 Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Wed, 22 Feb 2017 13:26:23 +0100 Subject: [PATCH 01/54] bigchain.is_new_transaction method --- bigchaindb/core.py | 16 +++++++++++++++ tests/db/test_bigchain_api.py | 37 +++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/bigchaindb/core.py b/bigchaindb/core.py index a7ed93f0..a4a65482 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -178,6 +178,22 @@ class Bigchain(object): exceptions.TransactionNotInValidBlock, exceptions.AmountError): return False + def is_new_transaction(self, txid, exclude_block_id=None): + """ + Return True if the transaction does not exist in any + VALID or UNDECIDED block. Return False otherwise. + + Args: + txid (str): Transaction ID + exclude_block_id (str): Exclude block from search + """ + block_statuses = self.get_blocks_status_containing_tx(txid) + block_statuses.pop(exclude_block_id, None) + for status in block_statuses.values(): + if status != self.BLOCK_INVALID: + return False + return True + def get_block(self, block_id, include_status=False): """Get the block with the specified `block_id` (and optionally its status) diff --git a/tests/db/test_bigchain_api.py b/tests/db/test_bigchain_api.py index 31abe176..c5b61c88 100644 --- a/tests/db/test_bigchain_api.py +++ b/tests/db/test_bigchain_api.py @@ -1240,3 +1240,40 @@ def test_transaction_unicode(b): assert b.get_block(block.id) == block.to_dict() assert block.validate(b) == block assert beer_json in serialize(block.to_dict()) + + +@pytest.mark.bdb +def test_is_new_transaction(b, genesis_block): + from bigchaindb.models import Transaction + + def write_tx(n): + tx = Transaction.create([b.me], [([b.me], n)]) + tx = tx.sign([b.me_private]) + # Tx is new because it's not in any block + assert b.is_new_transaction(tx.id) + + block = b.create_block([tx]) + b.write_block(block) + return tx, block + + # test VALID case + tx, block = write_tx(1) + # Tx is now in undecided block + assert not b.is_new_transaction(tx.id) + assert b.is_new_transaction(tx.id, exclude_block_id=block.id) + # After voting valid, should not be new + vote = b.vote(block.id, genesis_block.id, True) + b.write_vote(vote) + assert not b.is_new_transaction(tx.id) + assert b.is_new_transaction(tx.id, exclude_block_id=block.id) + + # test INVALID case + tx, block = write_tx(2) + # Tx is now in undecided block + assert not b.is_new_transaction(tx.id) + assert b.is_new_transaction(tx.id, exclude_block_id=block.id) + vote = b.vote(block.id, genesis_block.id, False) + b.write_vote(vote) + # Tx is new because it's only found in an invalid block + assert b.is_new_transaction(tx.id) + assert b.is_new_transaction(tx.id, exclude_block_id=block.id) From fc2b684f32d8476aba34ccd2c9fdb93c5e15131f Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Wed, 22 Feb 2017 13:52:53 +0100 Subject: [PATCH 02/54] use is_new_transaction in pipelines --- bigchaindb/pipelines/block.py | 29 ++++++++++------------------- bigchaindb/pipelines/vote.py | 10 ++++++++-- tests/pipelines/test_vote.py | 14 ++++++++++++++ 3 files changed, 32 insertions(+), 21 deletions(-) diff --git a/bigchaindb/pipelines/block.py b/bigchaindb/pipelines/block.py index 5f372384..1f2e9017 100644 --- a/bigchaindb/pipelines/block.py +++ b/bigchaindb/pipelines/block.py @@ -67,28 +67,19 @@ class BlockPipeline: AmountError): return None - if self.bigchain.transaction_exists(tx.id): - # if the transaction already exists, we must check whether - # it's in a valid or undecided block - tx, status = self.bigchain.get_transaction(tx.id, - include_status=True) - if status == self.bigchain.TX_VALID \ - or status == self.bigchain.TX_UNDECIDED: - # if the tx is already in a valid or undecided block, - # then it no longer should be in the backlog, or added - # to a new block. We can delete and drop it. - self.bigchain.delete_transaction(tx.id) - return None - - tx_validated = self.bigchain.is_valid_transaction(tx) - if tx_validated: - return tx - else: - # if the transaction is not valid, remove it from the - # backlog + # If transaction is in any VALID or UNDECIDED block we + # should not include it again + if not self.bigchain.is_new_transaction(tx.id): self.bigchain.delete_transaction(tx.id) return None + # If transaction is not valid it should not be included + if not self.bigchain.is_valid_transaction(tx): + self.bigchain.delete_transaction(tx.id) + return None + + return tx + def create(self, tx, timeout=False): """Create a block. diff --git a/bigchaindb/pipelines/vote.py b/bigchaindb/pipelines/vote.py index 5f385f53..958e5b9c 100644 --- a/bigchaindb/pipelines/vote.py +++ b/bigchaindb/pipelines/vote.py @@ -90,7 +90,8 @@ class Vote: yield tx, block_id, num_tx def validate_tx(self, tx, block_id, num_tx): - """Validate a transaction. + """Validate a transaction. Transaction must also not be in any VALID + block. Args: tx (dict): the transaction to validate @@ -101,7 +102,12 @@ class Vote: Three values are returned, the validity of the transaction, ``block_id``, ``num_tx``. """ - return bool(self.bigchain.is_valid_transaction(tx)), block_id, num_tx + new = self.bigchain.is_new_transaction(tx.id, exclude_block_id=block_id) + if not new: + return False, block_id, num_tx + + valid = bool(self.bigchain.is_valid_transaction(tx)) + return valid, block_id, num_tx def vote(self, tx_validity, block_id, num_tx): """Collect the validity of transactions and cast a vote when ready. diff --git a/tests/pipelines/test_vote.py b/tests/pipelines/test_vote.py index e0b27f50..20beac1e 100644 --- a/tests/pipelines/test_vote.py +++ b/tests/pipelines/test_vote.py @@ -629,3 +629,17 @@ def test_start(mock_start, b): from bigchaindb.pipelines import vote vote.start() mock_start.assert_called_with() + + +@pytest.mark.genesis +def test_vote_no_double_inclusion(b): + from bigchaindb.pipelines import vote + + tx = dummy_tx(b) + block = b.create_block([tx]) + r = vote.Vote().validate_tx(tx, block.id, 1) + assert r == (True, block.id, 1) + + b.write_block(block) + r = vote.Vote().validate_tx(tx, 'other_block_id', 1) + assert r == (False, 'other_block_id', 1) From 6e7534d3c23f5bcf486b767dca1dede3e3ec3cfc Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Wed, 22 Feb 2017 13:55:26 +0100 Subject: [PATCH 03/54] cleanup; remove transaction_exists and has_transaction --- bigchaindb/backend/mongodb/query.py | 7 ------- bigchaindb/backend/query.py | 14 -------------- bigchaindb/backend/rethinkdb/query.py | 7 ------- bigchaindb/core.py | 3 --- tests/backend/mongodb/test_queries.py | 13 ------------- tests/backend/test_generics.py | 1 - tests/test_core.py | 9 --------- 7 files changed, 54 deletions(-) diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index 6a241a78..1988db04 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -212,13 +212,6 @@ def get_block(conn, block_id): projection={'_id': False})) -@register_query(MongoDBConnection) -def has_transaction(conn, transaction_id): - return bool(conn.run( - conn.collection('bigchain') - .find_one({'block.transactions.id': transaction_id}))) - - @register_query(MongoDBConnection) def count_blocks(conn): return conn.run( diff --git a/bigchaindb/backend/query.py b/bigchaindb/backend/query.py index bdfb9e28..9aa653d7 100644 --- a/bigchaindb/backend/query.py +++ b/bigchaindb/backend/query.py @@ -211,20 +211,6 @@ def get_block(connection, block_id): raise NotImplementedError -@singledispatch -def has_transaction(connection, transaction_id): - """Check if a transaction exists in the bigchain table. - - Args: - transaction_id (str): the id of the transaction to check. - - Returns: - ``True`` if the transaction exists, ``False`` otherwise. - """ - - raise NotImplementedError - - @singledispatch def count_blocks(connection): """Count the number of blocks in the bigchain table. diff --git a/bigchaindb/backend/rethinkdb/query.py b/bigchaindb/backend/rethinkdb/query.py index 99346984..6011cc8c 100644 --- a/bigchaindb/backend/rethinkdb/query.py +++ b/bigchaindb/backend/rethinkdb/query.py @@ -158,13 +158,6 @@ def get_block(connection, block_id): return connection.run(r.table('bigchain').get(block_id)) -@register_query(RethinkDBConnection) -def has_transaction(connection, transaction_id): - return bool(connection.run( - r.table('bigchain', read_mode=READ_MODE) - .get_all(transaction_id, index='transaction_id').count())) - - @register_query(RethinkDBConnection) def count_blocks(connection): return connection.run( diff --git a/bigchaindb/core.py b/bigchaindb/core.py index a4a65482..b5465ef5 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -542,9 +542,6 @@ class Bigchain(object): return backend.query.write_block(self.connection, block) - def transaction_exists(self, transaction_id): - return backend.query.has_transaction(self.connection, transaction_id) - def prepare_genesis_block(self): """Prepare a genesis block.""" diff --git a/tests/backend/mongodb/test_queries.py b/tests/backend/mongodb/test_queries.py index 48089805..80e3cc91 100644 --- a/tests/backend/mongodb/test_queries.py +++ b/tests/backend/mongodb/test_queries.py @@ -248,19 +248,6 @@ def test_get_block(signed_create_tx): assert block_db == block.to_dict() -def test_has_transaction(signed_create_tx): - from bigchaindb.backend import connect, query - from bigchaindb.models import Block - conn = connect() - - # create and insert block - block = Block(transactions=[signed_create_tx]) - conn.db.bigchain.insert_one(block.to_dict()) - - assert query.has_transaction(conn, signed_create_tx.id) - assert query.has_transaction(conn, 'aaa') is False - - def test_count_blocks(signed_create_tx): from bigchaindb.backend import connect, query from bigchaindb.models import Block diff --git a/tests/backend/test_generics.py b/tests/backend/test_generics.py index 2f86d417..57a644ee 100644 --- a/tests/backend/test_generics.py +++ b/tests/backend/test_generics.py @@ -29,7 +29,6 @@ def test_schema(schema_func_name, args_qty): ('get_votes_by_block_id', 1), ('write_block', 1), ('get_block', 1), - ('has_transaction', 1), ('write_vote', 1), ('get_last_voted_block', 1), ('get_unvoted_blocks', 1), diff --git a/tests/test_core.py b/tests/test_core.py index 6bcabdc9..977db065 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -88,12 +88,3 @@ def test_has_previous_vote(monkeypatch): block = {'votes': ({'node_pubkey': 'pubkey'},)} with pytest.raises(Exception): bigchain.has_previous_vote(block) - - -@pytest.mark.parametrize('exists', (True, False)) -def test_transaction_exists(monkeypatch, exists): - from bigchaindb.core import Bigchain - monkeypatch.setattr( - 'bigchaindb.backend.query.has_transaction', lambda x, y: exists) - bigchain = Bigchain(public_key='pubkey', private_key='privkey') - assert bigchain.transaction_exists('txid') is exists From c11808ecc55c62022ab286f9044670972f824c87 Mon Sep 17 00:00:00 2001 From: vrde Date: Thu, 23 Feb 2017 17:20:21 +0100 Subject: [PATCH 04/54] Move common stuff to generic Connection class --- bigchaindb/__init__.py | 4 ++ bigchaindb/backend/connection.py | 54 ++++++++++++++- bigchaindb/backend/mongodb/connection.py | 79 ++++++---------------- bigchaindb/backend/rethinkdb/connection.py | 51 ++------------ tests/backend/mongodb/test_admin.py | 2 +- tests/test_config_utils.py | 5 ++ tests/test_core.py | 2 + 7 files changed, 90 insertions(+), 107 deletions(-) diff --git a/bigchaindb/__init__.py b/bigchaindb/__init__.py index 10e9e6ce..1df2551c 100644 --- a/bigchaindb/__init__.py +++ b/bigchaindb/__init__.py @@ -10,6 +10,8 @@ _database_rethinkdb = { 'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'), 'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)), 'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'), + 'connection_timeout': 5000, + 'max_tries': 3, } _database_mongodb = { @@ -18,6 +20,8 @@ _database_mongodb = { 'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)), 'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'), 'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'), + 'connection_timeout': 5000, + 'max_tries': 3, } _database_map = { diff --git a/bigchaindb/backend/connection.py b/bigchaindb/backend/connection.py index df21321d..d0913cf6 100644 --- a/bigchaindb/backend/connection.py +++ b/bigchaindb/backend/connection.py @@ -1,8 +1,10 @@ +from itertools import repeat from importlib import import_module import logging import bigchaindb from bigchaindb.common.exceptions import ConfigurationError +from bigchaindb.backend.exceptions import ConnectionError BACKENDS = { @@ -13,7 +15,8 @@ BACKENDS = { logger = logging.getLogger(__name__) -def connect(backend=None, host=None, port=None, name=None, replicaset=None): +def connect(backend=None, host=None, port=None, name=None, max_tries=None, + connection_timeout=None, replicaset=None): """Create a new connection to the database backend. All arguments default to the current configuration's values if not @@ -58,7 +61,7 @@ def connect(backend=None, host=None, port=None, name=None, replicaset=None): raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc logger.debug('Connection: {}'.format(Class)) - return Class(host, port, dbname, replicaset=replicaset) + return Class(host=host, port=port, dbname=dbname, replicaset=replicaset) class Connection: @@ -68,17 +71,41 @@ class Connection: from and implements this class. """ - def __init__(self, host=None, port=None, dbname=None, *args, **kwargs): + def __init__(self, host=None, port=None, dbname=None, + connection_timeout=None, max_tries=None, + **kwargs): """Create a new :class:`~.Connection` instance. Args: host (str): the host to connect to. port (int): the port to connect to. dbname (str): the name of the database to use. + connection_timeout (int, optional): the milliseconds to wait + until timing out the database connection attempt. + Defaults to 5000ms. + max_tries (int, optional): how many tries before giving up, + if 0 then try forever. Defaults to 3. **kwargs: arbitrary keyword arguments provided by the configuration's ``database`` settings """ + dbconf = bigchaindb.config['database'] + + self.host = host or dbconf['host'] + self.port = port or dbconf['port'] + self.dbname = dbname or dbconf['name'] + self.connection_timeout = connection_timeout if connection_timeout is not None\ + else dbconf['connection_timeout'] + self.max_tries = max_tries if max_tries is not None else dbconf['max_tries'] + self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0) + self._conn = None + + @property + def conn(self): + if self._conn is None: + self.connect() + return self._conn + def run(self, query): """Run a query. @@ -94,3 +121,24 @@ class Connection: """ raise NotImplementedError() + + def connect(self): + """Try to connect to the database. + + Raises: + :exc:`~ConnectionError`: If the connection to the database + fails. + """ + + attempt = 0 + for i in self.max_tries_counter: + attempt += 1 + try: + self._conn = self._connect() + except ConnectionError as exc: + logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.', + attempt, self.max_tries if self.max_tries != 0 else '∞', + self.host, self.port, self.connection_timeout) + if attempt == self.max_tries: + logger.critical('Cannot connect to the Database. Giving up.') + raise ConnectionError() from exc diff --git a/bigchaindb/backend/mongodb/connection.py b/bigchaindb/backend/mongodb/connection.py index d01d5861..271d0e8e 100644 --- a/bigchaindb/backend/mongodb/connection.py +++ b/bigchaindb/backend/mongodb/connection.py @@ -1,6 +1,5 @@ import time import logging -from itertools import repeat import pymongo @@ -15,46 +14,20 @@ from bigchaindb.backend.connection import Connection logger = logging.getLogger(__name__) -# TODO: waiting for #1082 to be merged -# to move this constants in the configuration. - -CONNECTION_TIMEOUT = 4000 # in milliseconds -MAX_RETRIES = 3 # number of tries before giving up, if 0 then try forever - - class MongoDBConnection(Connection): - def __init__(self, host=None, port=None, dbname=None, - connection_timeout=None, max_tries=None, - replicaset=None): + def __init__(self, replicaset=None, **kwargs): """Create a new Connection instance. Args: - host (str, optional): the host to connect to. - port (int, optional): the port to connect to. - dbname (str, optional): the database to use. - connection_timeout (int, optional): the milliseconds to wait - until timing out the database connection attempt. - max_tries (int, optional): how many tries before giving up, - if 0 then try forever. replicaset (str, optional): the name of the replica set to connect to. + **kwargs: arbitrary keyword arguments provided by the + configuration's ``database`` settings """ - self.host = host or bigchaindb.config['database']['host'] - self.port = port or bigchaindb.config['database']['port'] + super().__init__(**kwargs) self.replicaset = replicaset or bigchaindb.config['database']['replicaset'] - self.dbname = dbname or bigchaindb.config['database']['name'] - self.connection_timeout = connection_timeout if connection_timeout is not None else CONNECTION_TIMEOUT - self.max_tries = max_tries if max_tries is not None else MAX_RETRIES - self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0) - self.connection = None - - @property - def conn(self): - if self.connection is None: - self._connect() - return self.connection @property def db(self): @@ -94,34 +67,23 @@ class MongoDBConnection(Connection): fails. """ - attempt = 0 - for i in self.max_tries_counter: - attempt += 1 + try: + # we should only return a connection if the replica set is + # initialized. initialize_replica_set will check if the + # replica set is initialized else it will initialize it. + initialize_replica_set(self.host, self.port, self.connection_timeout) - try: - # we should only return a connection if the replica set is - # initialized. initialize_replica_set will check if the - # replica set is initialized else it will initialize it. - initialize_replica_set(self.host, self.port, self.connection_timeout) + # FYI: this might raise a `ServerSelectionTimeoutError`, + # that is a subclass of `ConnectionFailure`. + return pymongo.MongoClient(self.host, + self.port, + replicaset=self.replicaset, + serverselectiontimeoutms=self.connection_timeout) - # FYI: this might raise a `ServerSelectionTimeoutError`, - # that is a subclass of `ConnectionFailure`. - self.connection = pymongo.MongoClient(self.host, - self.port, - replicaset=self.replicaset, - serverselectiontimeoutms=self.connection_timeout) - - # `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`. - except (pymongo.errors.ConnectionFailure, - pymongo.errors.OperationFailure) as exc: - logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.', - attempt, self.max_tries if self.max_tries != 0 else '∞', - self.host, self.port, self.connection_timeout) - if attempt == self.max_tries: - logger.critical('Cannot connect to the Database. Giving up.') - raise ConnectionError() from exc - else: - break + # `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`. + except (pymongo.errors.ConnectionFailure, + pymongo.errors.OperationFailure) as exc: + raise ConnectionError() from exc def initialize_replica_set(host, port, connection_timeout): @@ -166,9 +128,10 @@ def _check_replica_set(conn): replSet option. """ options = conn.admin.command('getCmdLineOpts') + print(options) try: repl_opts = options['parsed']['replication'] - repl_set_name = repl_opts.get('replSetName', None) or repl_opts['replSet'] + repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet')) except KeyError: raise ConfigurationError('mongod was not started with' ' the replSet option.') diff --git a/bigchaindb/backend/rethinkdb/connection.py b/bigchaindb/backend/rethinkdb/connection.py index 988573f6..e4d2c524 100644 --- a/bigchaindb/backend/rethinkdb/connection.py +++ b/bigchaindb/backend/rethinkdb/connection.py @@ -1,11 +1,7 @@ -import time -import logging - import rethinkdb as r from bigchaindb.backend.connection import Connection - -logger = logging.getLogger(__name__) +from bigchaindb.backend.exceptions import ConnectionError class RethinkDBConnection(Connection): @@ -17,23 +13,6 @@ class RethinkDBConnection(Connection): more times to run the query or open a connection. """ - def __init__(self, host, port, dbname, max_tries=3, **kwargs): - """Create a new :class:`~.RethinkDBConnection` instance. - - See :meth:`.Connection.__init__` for - :attr:`host`, :attr:`port`, and :attr:`dbname`. - - Args: - max_tries (int, optional): how many tries before giving up. - Defaults to 3. - """ - - self.host = host - self.port = port - self.dbname = dbname - self.max_tries = max_tries - self.conn = None - def run(self, query): """Run a RethinkDB query. @@ -45,16 +24,7 @@ class RethinkDBConnection(Connection): :attr:`~.RethinkDBConnection.max_tries`. """ - if self.conn is None: - self._connect() - - for i in range(self.max_tries): - try: - return query.run(self.conn) - except r.ReqlDriverError: - if i + 1 == self.max_tries: - raise - self._connect() + return query.run(self.conn) def _connect(self): """Set a connection to RethinkDB. @@ -66,16 +36,7 @@ class RethinkDBConnection(Connection): :attr:`~.RethinkDBConnection.max_tries`. """ - for i in range(1, self.max_tries + 1): - logging.debug('Connecting to database %s:%s/%s. (Attempt %s/%s)', - self.host, self.port, self.dbname, i, self.max_tries) - try: - self.conn = r.connect(host=self.host, port=self.port, db=self.dbname) - except r.ReqlDriverError: - if i == self.max_tries: - raise - wait_time = 2**i - logging.debug('Error connecting to database, waiting %ss', wait_time) - time.sleep(wait_time) - else: - break + try: + return r.connect(host=self.host, port=self.port, db=self.dbname) + except r.ReqlDriverError as exc: + raise ConnectionError() from exc diff --git a/tests/backend/mongodb/test_admin.py b/tests/backend/mongodb/test_admin.py index 148c853a..075ea2f9 100644 --- a/tests/backend/mongodb/test_admin.py +++ b/tests/backend/mongodb/test_admin.py @@ -40,7 +40,7 @@ def connection(): # executed to make sure that the replica set is correctly initialized. # Here we force the the connection setup so that all required # `Database.command` are executed before we mock them it in the tests. - connection._connect() + connection.connect() return connection diff --git a/tests/test_config_utils.py b/tests/test_config_utils.py index d69b789a..727c3ba3 100644 --- a/tests/test_config_utils.py +++ b/tests/test_config_utils.py @@ -139,12 +139,17 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request): 'host': DATABASE_HOST, 'port': DATABASE_PORT, 'name': DATABASE_NAME, + 'connection_timeout': 5000, + 'max_tries': 3 } + database_mongodb = { 'backend': 'mongodb', 'host': DATABASE_HOST, 'port': DATABASE_PORT, 'name': DATABASE_NAME, + 'connection_timeout': 5000, + 'max_tries': 3, 'replicaset': 'bigchain-rs', } diff --git a/tests/test_core.py b/tests/test_core.py index 6bcabdc9..f6f56ed1 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -10,6 +10,8 @@ def config(request, monkeypatch): 'port': 28015, 'name': 'bigchain', 'replicaset': 'bigchain-rs', + 'connection_timeout': 5000, + 'max_tries': 3 }, 'keypair': { 'public': 'pubkey', From 213139d4c6d9d64d1e6fd2ad68790ee68559c5d5 Mon Sep 17 00:00:00 2001 From: vrde Date: Mon, 27 Feb 2017 02:36:51 +0100 Subject: [PATCH 05/54] Improve tests and connection class --- bigchaindb/backend/connection.py | 6 +++- bigchaindb/backend/rethinkdb/changefeed.py | 3 +- bigchaindb/backend/rethinkdb/connection.py | 9 +++-- tests/backend/rethinkdb/test_connection.py | 41 +++++++++++++++------- 4 files changed, 41 insertions(+), 18 deletions(-) diff --git a/bigchaindb/backend/connection.py b/bigchaindb/backend/connection.py index d0913cf6..c1f0a629 100644 --- a/bigchaindb/backend/connection.py +++ b/bigchaindb/backend/connection.py @@ -61,7 +61,9 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None, raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc logger.debug('Connection: {}'.format(Class)) - return Class(host=host, port=port, dbname=dbname, replicaset=replicaset) + return Class(host=host, port=port, dbname=dbname, + max_tries=max_tries, connection_timeout=connection_timeout, + replicaset=replicaset) class Connection: @@ -142,3 +144,5 @@ class Connection: if attempt == self.max_tries: logger.critical('Cannot connect to the Database. Giving up.') raise ConnectionError() from exc + else: + break diff --git a/bigchaindb/backend/rethinkdb/changefeed.py b/bigchaindb/backend/rethinkdb/changefeed.py index e762d905..fb2dec1f 100644 --- a/bigchaindb/backend/rethinkdb/changefeed.py +++ b/bigchaindb/backend/rethinkdb/changefeed.py @@ -3,6 +3,7 @@ import logging import rethinkdb as r from bigchaindb import backend +from bigchaindb.backend.exceptions import BackendError from bigchaindb.backend.changefeed import ChangeFeed from bigchaindb.backend.utils import module_dispatch_registrar from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection @@ -23,7 +24,7 @@ class RethinkDBChangeFeed(ChangeFeed): try: self.run_changefeed() break - except (r.ReqlDriverError, r.ReqlOpFailedError) as exc: + except BackendError as exc: logger.exception(exc) time.sleep(1) diff --git a/bigchaindb/backend/rethinkdb/connection.py b/bigchaindb/backend/rethinkdb/connection.py index e4d2c524..e917e326 100644 --- a/bigchaindb/backend/rethinkdb/connection.py +++ b/bigchaindb/backend/rethinkdb/connection.py @@ -1,7 +1,7 @@ import rethinkdb as r from bigchaindb.backend.connection import Connection -from bigchaindb.backend.exceptions import ConnectionError +from bigchaindb.backend.exceptions import ConnectionError, OperationError class RethinkDBConnection(Connection): @@ -24,7 +24,10 @@ class RethinkDBConnection(Connection): :attr:`~.RethinkDBConnection.max_tries`. """ - return query.run(self.conn) + try: + return query.run(self.conn) + except r.ReqlDriverError as exc: + raise OperationError from exc def _connect(self): """Set a connection to RethinkDB. @@ -39,4 +42,4 @@ class RethinkDBConnection(Connection): try: return r.connect(host=self.host, port=self.port, db=self.dbname) except r.ReqlDriverError as exc: - raise ConnectionError() from exc + raise ConnectionError from exc diff --git a/tests/backend/rethinkdb/test_connection.py b/tests/backend/rethinkdb/test_connection.py index 073fecee..318ff850 100644 --- a/tests/backend/rethinkdb/test_connection.py +++ b/tests/backend/rethinkdb/test_connection.py @@ -1,6 +1,7 @@ import time import multiprocessing as mp from threading import Thread +from unittest import mock from unittest.mock import patch import pytest @@ -34,6 +35,7 @@ def test_run_a_simple_query(): def test_raise_exception_when_max_tries(): from bigchaindb.backend import connect + from bigchaindb.backend.exceptions import OperationError class MockQuery: def run(self, conn): @@ -41,28 +43,41 @@ def test_raise_exception_when_max_tries(): conn = connect() - with pytest.raises(r.ReqlDriverError): + with pytest.raises(OperationError): conn.run(MockQuery()) def test_reconnect_when_connection_lost(): from bigchaindb.backend import connect - def raise_exception(*args, **kwargs): - raise r.ReqlDriverError('mock') - - conn = connect() original_connect = r.connect - r.connect = raise_exception - def delayed_start(): - time.sleep(1) - r.connect = original_connect + with patch('rethinkdb.connect') as mock_connect: + mock_connect.side_effect = [ + r.ReqlDriverError('mock'), + original_connect() + ] - thread = Thread(target=delayed_start) - query = r.expr('1') - thread.start() - assert conn.run(query) == '1' + conn = connect() + query = r.expr('1') + assert conn.run(query) == '1' + + +def test_reconnect_when_connection_lost_tries_n_times(): + from bigchaindb.backend import connect + from bigchaindb.backend.exceptions import ConnectionError + + with patch('rethinkdb.connect') as mock_connect: + mock_connect.side_effect = [ + r.ReqlDriverError('mock'), + r.ReqlDriverError('mock'), + r.ReqlDriverError('mock') + ] + + conn = connect(max_tries=3) + query = r.expr('1') + with pytest.raises(ConnectionError): + assert conn.run(query) == '1' def test_changefeed_reconnects_when_connection_lost(monkeypatch): From f9748042c2fb94f006444ad1106b3d38397fc218 Mon Sep 17 00:00:00 2001 From: vrde Date: Mon, 27 Feb 2017 11:14:11 +0100 Subject: [PATCH 06/54] Fix exception handling in changefeed --- bigchaindb/backend/rethinkdb/changefeed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bigchaindb/backend/rethinkdb/changefeed.py b/bigchaindb/backend/rethinkdb/changefeed.py index fb2dec1f..390ada9a 100644 --- a/bigchaindb/backend/rethinkdb/changefeed.py +++ b/bigchaindb/backend/rethinkdb/changefeed.py @@ -24,8 +24,8 @@ class RethinkDBChangeFeed(ChangeFeed): try: self.run_changefeed() break - except BackendError as exc: - logger.exception(exc) + except (BackendError, r.ReqlDriverError) as exc: + logger.exception('Error connecting to the database, retrying') time.sleep(1) def run_changefeed(self): From f21811323f9d2e7d0038cdf9c4a13e878200f864 Mon Sep 17 00:00:00 2001 From: vrde Date: Mon, 27 Feb 2017 11:51:06 +0100 Subject: [PATCH 07/54] Remove unused import --- tests/backend/rethinkdb/test_connection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/backend/rethinkdb/test_connection.py b/tests/backend/rethinkdb/test_connection.py index 318ff850..880862af 100644 --- a/tests/backend/rethinkdb/test_connection.py +++ b/tests/backend/rethinkdb/test_connection.py @@ -1,7 +1,6 @@ import time import multiprocessing as mp from threading import Thread -from unittest import mock from unittest.mock import patch import pytest From 3523036617e383995d217d0094c996c6b8f803ae Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Mon, 27 Feb 2017 15:23:53 +0100 Subject: [PATCH 08/54] don't load consensus in vote pipeline (unneeded) --- bigchaindb/pipelines/vote.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/bigchaindb/pipelines/vote.py b/bigchaindb/pipelines/vote.py index a857ba78..da28cb30 100644 --- a/bigchaindb/pipelines/vote.py +++ b/bigchaindb/pipelines/vote.py @@ -13,9 +13,7 @@ from multipipes import Pipeline, Node import bigchaindb from bigchaindb import Bigchain from bigchaindb import backend -from bigchaindb import config_utils from bigchaindb.backend.changefeed import ChangeFeed -from bigchaindb.consensus import BaseConsensusRules from bigchaindb.models import Transaction, Block from bigchaindb.common import exceptions @@ -37,13 +35,6 @@ class Vote: # we need to create a temporary instance of BigchainDB that we use # only to query RethinkDB - consensusPlugin = bigchaindb.config.get('consensus_plugin') - - if consensusPlugin: - self.consensus = config_utils.load_consensus_plugin(consensusPlugin) - else: - self.consensus = BaseConsensusRules - # This is the Bigchain instance that will be "shared" (aka: copied) # by all the subprocesses From ffb1d7a624f2711c4aba3efb409fb128c3ed9d5d Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Mon, 27 Feb 2017 15:29:49 +0100 Subject: [PATCH 09/54] Docs on using special SSH keypair with ACS/Kubernetes --- docs/server/source/appendices/aws-setup.md | 17 +------ .../appendices/generate-key-pair-for-ssh.md | 34 +++++++++++++ docs/server/source/appendices/index.rst | 1 + .../template-kubernetes-azure.rst | 48 +++++++++++++++++-- 4 files changed, 81 insertions(+), 19 deletions(-) create mode 100644 docs/server/source/appendices/generate-key-pair-for-ssh.md diff --git a/docs/server/source/appendices/aws-setup.md b/docs/server/source/appendices/aws-setup.md index 0471f8af..38ce2c1c 100644 --- a/docs/server/source/appendices/aws-setup.md +++ b/docs/server/source/appendices/aws-setup.md @@ -42,23 +42,10 @@ This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and p Eventually, you'll have one or more instances (virtual machines) running on AWS and you'll want to SSH to them. To do that, you need a public/private key pair. The public key will be sent to AWS, and you can tell AWS to put it in any instances you provision there. You'll keep the private key on your local workstation. -First you need to make up a key name. Some ideas: +See the [page about how to generate a key pair for SSH](generate-key-pair-for-ssh.html). -* `bcdb-troy-1` -* `bigchaindb-7` -* `bcdb-jupiter` -If you already have key pairs on AWS (Amazon EC2), you have to pick a name that's not already being used. -Below, replace every instance of `` with your actual key name. -To generate a public/private RSA key pair with that name: -```text -ssh-keygen -t rsa -C "" -f ~/.ssh/ -``` - -It will ask you for a passphrase. You can use whatever passphrase you like, but don't lose it. Two keys (files) will be created in `~/.ssh/`: - -1. `~/.ssh/.pub` is the public key -2. `~/.ssh/` is the private key +## Send the Public Key to AWS To send the public key to AWS, use the AWS Command-Line Interface: ```text diff --git a/docs/server/source/appendices/generate-key-pair-for-ssh.md b/docs/server/source/appendices/generate-key-pair-for-ssh.md new file mode 100644 index 00000000..18b19392 --- /dev/null +++ b/docs/server/source/appendices/generate-key-pair-for-ssh.md @@ -0,0 +1,34 @@ +# Generate a Key Pair for SSH + +This page describes how to use `ssh-keygen` +to generate a public/private RSA key pair +that can be used with SSH. +(Note: `ssh-keygen` is found on most Linux and Unix-like +operating systems; if you're using Windows, +then you'll have to use another tool, +such as PuTTYgen.) + +By convention, SSH key pairs get stored in the `~/.ssh/` directory. +Check what keys you already have there: +```text +ls -1 ~/.ssh/ +``` + +Next, make up a new key pair name (called `` below). +Here are some ideas: + +* `aws-bdb-2` +* `tim-bdb-azure` +* `chris-bcdb-key` + +Next, generate a public/private RSA key pair with that name: +```text +ssh-keygen -t rsa -C "" -f ~/.ssh/ +``` + +It will ask you for a passphrase. +You can use whatever passphrase you like, but don't lose it. +Two keys (files) will be created in `~/.ssh/`: + +1. `~/.ssh/.pub` is the public key +2. `~/.ssh/` is the private key diff --git a/docs/server/source/appendices/index.rst b/docs/server/source/appendices/index.rst index 41b742b9..365bedfa 100755 --- a/docs/server/source/appendices/index.rst +++ b/docs/server/source/appendices/index.rst @@ -17,6 +17,7 @@ Appendices pipelines backend aws-setup + generate-key-pair-for-ssh firewall-notes ntp-notes example-rethinkdb-storage-setups diff --git a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst index ad4a8b04..a5d6f086 100644 --- a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst +++ b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst @@ -18,7 +18,20 @@ You may find that you have to sign up for a Free Trial subscription first. That's okay: you can have many subscriptions. -Step 2: Deploy an Azure Container Service (ACS) +Step 2: Create an SSH Key Pair +------------------------------ + +You'll want an SSH key pair so you'll be able to SSH +to the virtual machines that you'll deploy in the next step. +(If you already have an SSH key pair, you *could* reuse it, +but it's probably a good idea to make a new SSH key pair +for your Kubernetes VMs and nothing else.) + +See the +:ref:`page about how to generate a key pair for SSH `. + + +Step 3: Deploy an Azure Container Service (ACS) ----------------------------------------------- It's *possible* to deploy an Azure Container Service (ACS) @@ -82,8 +95,7 @@ Finally, you can deploy an ACS using something like: --agent-count 3 \ --agent-vm-size Standard_D2_v2 \ --dns-prefix \ - --generate-ssh-keys \ - --location \ + --ssh-key-value ~/.ssh/.pub \ --orchestrator-type kubernetes There are more options. For help understanding all the options, use the built-in help: @@ -100,4 +112,32 @@ and click on the one you created to see all the resources in it. Next, you can :doc:`run a BigchainDB node on your new -Kubernetes cluster `. \ No newline at end of file +Kubernetes cluster `. + + +Optional: SSH to Your New Kubernetes Cluster Nodes +-------------------------------------------------- + +You can SSH to one of the just-deployed Kubernetes "master" nodes +(virtual machines) using: + +.. code:: bash + + $ ssh -i ~/.ssh/.pub azureuser@ + +where you can get the IP address or hostname +of a master node from the Azure Portal. +Note how the default username is ``azureuser``. + +The "agent" nodes don't get public IP addresses or hostnames, +so you can't SSH to them *directly*, +but you can first SSH to the master +and then SSH to an agent from there +(using the *private* IP address of the agent node). +To do that, you either need to copy your SSH key pair to +the master (a bad idea), +or use something like +`SSH agent forwarding `_ (better). + +Next, you can :doc:`run a BigchainDB node on your new +Kubernetes cluster `. From b3e697f05be3b08d69521d8d562f54e2ebdd7630 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 28 Feb 2017 11:25:57 +0100 Subject: [PATCH 10/54] docs: can SSH to k8s agent via master using private hostname --- .../cloud-deployment-templates/template-kubernetes-azure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst index a5d6f086..62f765d0 100644 --- a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst +++ b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst @@ -133,7 +133,7 @@ The "agent" nodes don't get public IP addresses or hostnames, so you can't SSH to them *directly*, but you can first SSH to the master and then SSH to an agent from there -(using the *private* IP address of the agent node). +(using the *private* IP address or hostname of the agent node). To do that, you either need to copy your SSH key pair to the master (a bad idea), or use something like From 6821170fcd102e65f84531b073c17feb7b4b2e48 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 28 Feb 2017 13:58:44 +0100 Subject: [PATCH 11/54] docs: rm paren. sentence re/ deploying an ACS --- .../cloud-deployment-templates/template-kubernetes-azure.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst index 62f765d0..0758912d 100644 --- a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst +++ b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst @@ -39,10 +39,6 @@ from the `Azure Portal `_ (i.e. online in your web browser) but it's actually easier to do it using the Azure Command-Line Interface (CLI). -(The Azure Portal will ask you for a public SSH key -and a "service principal," and you'll have to create those -first if they don't exist. The CLI will create them -for you if necessary.) Microsoft has `instructions to install the Azure CLI 2.0 on most common operating systems From f2f1ee8ef109131c92078016bd3d321a19d13371 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 28 Feb 2017 19:08:13 +0100 Subject: [PATCH 12/54] docs: create k8s StorageClass & PVC --- .../node-on-kubernetes.rst | 116 ++++++++++++++++-- 1 file changed, 109 insertions(+), 7 deletions(-) diff --git a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst index 03ffb2fe..9ae16675 100644 --- a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst +++ b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst @@ -21,7 +21,7 @@ Step 2: Configure kubectl The default location of the kubectl configuration file is ``~/.kube/config``. If you don't have that file, then you need to get it. -If you deployed your Kubernetes cluster on Azure +**Azure.** If you deployed your Kubernetes cluster on Azure using the Azure CLI 2.0 (as per :doc:`our template `), then you can get the ``~/.kube/config`` file using: @@ -32,15 +32,117 @@ then you can get the ``~/.kube/config`` file using: --name -Step 3: Run a MongoDB Container -------------------------------- +Step 3: Create a StorageClass +----------------------------- -To start a MongoDB Docker container in a pod on one of the cluster nodes: +MongoDB needs somewhere to store its data persistently, +outside the container where MongoDB is running. +Explaining how Kubernetes handles persistent volumes, +and the associated terminology, +is beyond the scope of this documentation; +see `the Kubernetes docs about persistent volumes +`_. + +The first thing to do is create a Kubernetes StorageClass. + +**Azure.** First, you need an Azure storage account. +While you might be able to use an existing one, +create a new one specifically for MongoDB data: .. code:: bash - $ kubectl ????? + $ az storage account create --name \ + --resource-group \ + --location \ + --sku Standard_LRS + +where LRS means locally-redundant storage. Other option-values (and other options) can be found in `the docs for az storage account create `_. + +Next, create a Kubernetes Storage Class named ``slow`` +by writing a file named ``azureStorageClass.yml`` containing: + +.. code:: yaml + + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: slow + provisioner: kubernetes.io/azure-disk + parameters: + skuName: Standard_LRS + location: + +and then: + +.. code:: bash + + $ kubectl apply -f azureStorageClass.yml + +You can check if it worked using ``kubectl get storageclasses``. + +Note that there is no line of the form +``storageAccount: `` +under ``parameters:``. When we included one +and then created a PersistentVolumeClaim based on it, +the PersistentVolumeClaim would get stuck +in a "Pending" state. -Note: The BigchainDB Dashboard can be deployed -as a Docker container, like everything else. +Step 4: Create a PersistentVolumeClaim +-------------------------------------- + +Next, you'll create a PersistentVolumeClaim named ``mongoclaim``. +Create a file named ``mongoclaim.yml`` +with the following contents: + +.. code:: yaml + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: mongoclaim + annotations: + volume.beta.kubernetes.io/storage-class: slow + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + +Note how there's no explicit depencency on the storage provider. +``ReadWriteOnce`` (RWO) means the volume can be mounted as +read-write by a single Kubernetes node. +(``ReadWriteOnce`` is the *only* access mode supported +by AzureDisk.) +``storage: 2Gi`` means the volume has a size of two +`gibibytes `_. +(You can change that if you like.) + +Create ``mongoclaim`` in your Kubernetes cluster: + +.. code:: bash + + $ kubectl apply -f mongoclaim.yml + +You can check its status using: + +.. code:: bash + + $ kubectl get pvc + +Initially, the status of ``mongoclaim`` might be "Pending" +but it should become "Bound" fairly quickly. + +.. code:: bash + + $ kubectl describe pvc + Name: mongoclaim + Namespace: default + StorageClass: slow + Status: Bound + Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21 + Labels: + Capacity: 2Gi + Access Modes: RWO + No events. From 35ee6e55398062f3fa365c9e3d9830c91e3c4ffe Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Wed, 1 Mar 2017 13:35:31 +0100 Subject: [PATCH 13/54] docs: add instructions to update az command --- .../template-kubernetes-azure.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst index 0758912d..0fe8c378 100644 --- a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst +++ b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst @@ -45,6 +45,12 @@ on most common operating systems `_. Do that. +First, update the Azure CLI to the latest version: + +.. code:: bash + + $ az component update + Next, login to your account using: .. code:: bash From 8be2e2055944e00b60b3914fc8ab558cae5f19d0 Mon Sep 17 00:00:00 2001 From: Trent McConaghy Date: Wed, 1 Mar 2017 15:29:09 +0100 Subject: [PATCH 14/54] Update CONTRIBUTING.md --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 03d02403..eedb866a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,6 +2,8 @@ There are many ways you can contribute to the BigchainDB project, some very easy and others more involved. We want to be friendly and welcoming to all potential contributors, so we ask that everyone involved abide by some simple guidelines outlined in our [Code of Conduct](./CODE_OF_CONDUCT.md). +Or, are you interested in contributing full-time? BigchainDB is hiring. See [here](https://github.com/bigchaindb/org/blob/master/engjob.md). + ## Easy Ways to Contribute The BigchainDB community has a Google Group and a Gitter chatroom. Our [Community page](https://www.bigchaindb.com/community) has more information about those. From 77c6b138a82491b538945a8b0115c6cb41585804 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Wed, 1 Mar 2017 15:49:20 +0100 Subject: [PATCH 15/54] expanded docs re/ Azure storage accounts & ACS --- .../node-on-kubernetes.rst | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst index 9ae16675..f64b8207 100644 --- a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst +++ b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst @@ -46,19 +46,28 @@ see `the Kubernetes docs about persistent volumes The first thing to do is create a Kubernetes StorageClass. **Azure.** First, you need an Azure storage account. -While you might be able to use an existing one, -create a new one specifically for MongoDB data: +If you deployed your Kubernetes cluster on Azure +using the Azure CLI 2.0 +(as per :doc:`our template `), +then the `az acs create` command already created two +storage accounts in the same location and resource group +as your Kubernetes cluster. +Both should have the same "storage account SKU": ``Standard_LRS``. +Standard storage is lower-cost and lower-performance. +It uses hard disk drives (HDD). +LRS means locally-redundant storage: three replicas +in the same data center. -.. code:: bash +Premium storage is higher-cost and higher-performance. +It uses solid state drives (SSD). +At the time of writing, +when we created a storage account with SKU ``Premium_LRS`` +and tried to use that, +the PersistentVolumeClaim would get stuck in a "Pending" state. +For future reference, the command to create a storage account is +`az storage account create `_. - $ az storage account create --name \ - --resource-group \ - --location \ - --sku Standard_LRS - -where LRS means locally-redundant storage. Other option-values (and other options) can be found in `the docs for az storage account create `_. - -Next, create a Kubernetes Storage Class named ``slow`` +Create a Kubernetes Storage Class named ``slow`` by writing a file named ``azureStorageClass.yml`` containing: .. code:: yaml @@ -86,6 +95,8 @@ under ``parameters:``. When we included one and then created a PersistentVolumeClaim based on it, the PersistentVolumeClaim would get stuck in a "Pending" state. +Kubernetes just looks for a storageAccount +with the specified skuName and location. Step 4: Create a PersistentVolumeClaim @@ -110,7 +121,7 @@ with the following contents: requests: storage: 2Gi -Note how there's no explicit depencency on the storage provider. +Note how there's no explicit mention of Azure, AWS or whatever. ``ReadWriteOnce`` (RWO) means the volume can be mounted as read-write by a single Kubernetes node. (``ReadWriteOnce`` is the *only* access mode supported From 4e32a492b16e9ec838631fbb88315481d9c72049 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Wed, 1 Mar 2017 16:26:57 +0100 Subject: [PATCH 16/54] docs: changed PersistentVolumeClaim from 2Gi to 20Gi --- .../source/cloud-deployment-templates/node-on-kubernetes.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst index f64b8207..afb0b438 100644 --- a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst +++ b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst @@ -119,14 +119,14 @@ with the following contents: - ReadWriteOnce resources: requests: - storage: 2Gi + storage: 20Gi Note how there's no explicit mention of Azure, AWS or whatever. ``ReadWriteOnce`` (RWO) means the volume can be mounted as read-write by a single Kubernetes node. (``ReadWriteOnce`` is the *only* access mode supported by AzureDisk.) -``storage: 2Gi`` means the volume has a size of two +``storage: 20Gi`` means the volume has a size of 20 `gibibytes `_. (You can change that if you like.) From b20278430db488117927731162d34c28b7e2e319 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 2 Mar 2017 12:23:49 +0100 Subject: [PATCH 17/54] Pin down rapidjson --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7e8c3441..1ff6bdbb 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ install_requires = [ 'pymongo~=3.4', 'pysha3~=1.0.2', 'cryptoconditions>=0.5.0', - 'python-rapidjson>=0.0.8', + 'python-rapidjson==0.0.8', 'logstats>=0.2.1', 'flask>=0.10.1', 'flask-restful~=0.3.0', From 6702ad192b3047a267e141f56384abb3a54bdd8f Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 2 Mar 2017 12:41:26 +0100 Subject: [PATCH 18/54] Update change log for 0.9.2 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f1f8d6b4..62dac89c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,14 @@ For reference, the possible headings are: * **External Contributors** to list contributors outside of BigchainDB GmbH. * **Notes** +## [0.9.2] - 2017-03-02 +Tag name: v0.9.2 + +### Fixed +Pin `python-rapidjson` library in `setup.py` to prevent `bigchaindb`'s +installation to fail due to +https://github.com/python-rapidjson/python-rapidjson/issues/62. + ## [0.9.1] - 2017-02-06 Tag name: v0.9.1 From 64f4afb7ad14b5b0fd3243f04c59cce231eddb75 Mon Sep 17 00:00:00 2001 From: "krish7919 (Krish)" Date: Fri, 3 Mar 2017 10:19:09 +0100 Subject: [PATCH 19/54] Run bdb v0.9.1 as a pod in k8s --- k8s/node-ss.yaml | 89 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 k8s/node-ss.yaml diff --git a/k8s/node-ss.yaml b/k8s/node-ss.yaml new file mode 100644 index 00000000..9580daf6 --- /dev/null +++ b/k8s/node-ss.yaml @@ -0,0 +1,89 @@ +##################################################### +# This config file uses bdb v0.9.1 with bundled rdb # +##################################################### + +apiVersion: v1 +kind: Service +metadata: + name: bdb-service + namespace: default + labels: + name: bdb-service +spec: + selector: + app: bdb + ports: + - port: 9984 + targetPort: 9984 + name: bdb-http-api + - port: 8080 + targetPort: 8080 + name: bdb-rethinkdb-api + type: LoadBalancer +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: bdb + namespace: default +spec: + serviceName: bdb + replicas: 1 + template: + metadata: + name: bdb + labels: + app: bdb + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "bdb091-configure", + "image": "bigchaindb/bigchaindb:0.9.1", + "command": ["bigchaindb", "-y", "configure", "rethinkdb"], + "volumeMounts": [ + { + "name": "bigchaindb-data", + "mountPath": "/data" + } + ] + } + ]' + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: bdb091-server + image: bigchaindb/bigchaindb:0.9.1 + args: + - -c + - /data/.bigchaindb + - start + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9984 + hostPort: 9984 + name: bdb-port + protocol: TCP + volumeMounts: + - name: bigchaindb-data + mountPath: /data + resources: + limits: + cpu: 200m + memory: 768Mi + livenessProbe: + httpGet: + path: / + port: 9984 + initialDelaySeconds: 15 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: / + port: 9984 + initialDelaySeconds: 15 + timeoutSeconds: 10 + restartPolicy: Always + volumes: + - name: bigchaindb-data + hostPath: + path: /disk/bigchaindb-data From dc9f93dfbc2ee1427b58f6775854df8578db73df Mon Sep 17 00:00:00 2001 From: Krish Date: Fri, 3 Mar 2017 10:29:30 +0100 Subject: [PATCH 20/54] Enhancements to run with docker locally (#1239) --- .../server/source/appendices/run-with-docker.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/server/source/appendices/run-with-docker.md b/docs/server/source/appendices/run-with-docker.md index 6c1d2ce0..6700391e 100644 --- a/docs/server/source/appendices/run-with-docker.md +++ b/docs/server/source/appendices/run-with-docker.md @@ -75,6 +75,8 @@ docker run \ --name=rethinkdb \ --publish=172.17.0.1:28015:28015 \ --publish=172.17.0.1:58080:8080 \ + --restart=always \ + --volume "$HOME/bigchaindb_docker:/data" \ rethinkdb:2.3 ``` @@ -85,11 +87,25 @@ You can also access the RethinkDB dashboard at #### For MongoDB +Note: MongoDB runs as user `mongodb` which had the UID `999` and GID `999` +inside the container. For the volume to be mounted properly, as user `mongodb` +in your host, you should have a `mongodb` user with UID and GID `999`. +If you have another user on the host with UID `999`, the mapped files will +be owned by this user in the host. +If there is no owner with UID 999, you can create the corresponding user and +group. + +`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb` + + ```text docker run \ --detach \ --name=mongodb \ --publish=172.17.0.1:27017:27017 \ + --restart=always \ + --volume=/tmp/mongodb_docker/db:/data/db \ + --volume=/tmp/mongodb_docker/configdb:/data/configdb \ mongo:3.4.1 --replSet=bigchain-rs ``` @@ -100,6 +116,7 @@ docker run \ --detach \ --name=bigchaindb \ --publish=59984:9984 \ + --restart=always \ --volume=$HOME/bigchaindb_docker:/data \ bigchaindb/bigchaindb \ start From 9228e693551f8ad514e99a0ef2c44fa9672f60c0 Mon Sep 17 00:00:00 2001 From: "krish7919 (Krish)" Date: Fri, 3 Mar 2017 12:07:49 +0100 Subject: [PATCH 21/54] Run bdb:latest with rdb:2.3 as separate containers in the same pod --- k8s/node-ss.yaml | 46 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/k8s/node-ss.yaml b/k8s/node-ss.yaml index 9580daf6..f660aebc 100644 --- a/k8s/node-ss.yaml +++ b/k8s/node-ss.yaml @@ -1,6 +1,6 @@ -##################################################### -# This config file uses bdb v0.9.1 with bundled rdb # -##################################################### +################################################################## +# This config file uses bdb:latest with a spearate rethinkdb:2.3 # +################################################################## apiVersion: v1 kind: Service @@ -37,8 +37,8 @@ spec: annotations: pod.beta.kubernetes.io/init-containers: '[ { - "name": "bdb091-configure", - "image": "bigchaindb/bigchaindb:0.9.1", + "name": "bdb-configure", + "image": "bigchaindb/bigchaindb:latest", "command": ["bigchaindb", "-y", "configure", "rethinkdb"], "volumeMounts": [ { @@ -52,7 +52,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: bdb091-server - image: bigchaindb/bigchaindb:0.9.1 + image: bigchaindb/bigchaindb:latest args: - -c - /data/.bigchaindb @@ -82,8 +82,42 @@ spec: port: 9984 initialDelaySeconds: 15 timeoutSeconds: 10 + - name: rethinkdb + image: rethinkdb:2.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + hostPort: 8080 + name: rdb-http-port + protocol: TCP + - containerPort: 28015 + hostPort: 28015 + name: rdb-client-port + protocol: TCP + volumeMounts: + - name: rdb-data + mountPath: /data + resources: + limits: + cpu: 200m + memory: 768Mi + livenessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 10 restartPolicy: Always volumes: - name: bigchaindb-data hostPath: path: /disk/bigchaindb-data + - name: rdb-data + hostPath: + path: /disk/rdb-data From bbe9d4fc88625e5c880525482b16fb51cd061c78 Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Fri, 3 Mar 2017 13:24:30 +0100 Subject: [PATCH 22/54] add some clarifications to test_voting.py --- tests/test_voting.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_voting.py b/tests/test_voting.py index 404f4c93..a14345a2 100644 --- a/tests/test_voting.py +++ b/tests/test_voting.py @@ -49,6 +49,9 @@ def test_count_votes(): 'counts': { 'n_valid': 9, # 9 kosher votes 'n_invalid': 4, # 1 cheat, 1 invalid, 1 malformed, 1 rogue prev block + # One of the cheat votes counts towards n_invalid, the other is + # not counted here. + # len(cheat) + n_valid + n_invalid == len(votes) }, 'cheat': [votes[:2]], 'malformed': [votes[3]], @@ -83,18 +86,15 @@ def test_must_agree_prev_block(): # Tests for vote decision making -DECISION_TESTS = [dict( - zip(['n_voters', 'n_valid', 'n_invalid'], t)) - for t in [ - (1, 1, 1), - (2, 2, 1), - (3, 2, 2), - (4, 3, 2), - (5, 3, 3), - (6, 4, 3), - (7, 4, 4), - (8, 5, 4), - ] +DECISION_TESTS = [ + {'n_voters': 1, 'n_valid': 1, 'n_invalid': 1}, + {'n_voters': 2, 'n_valid': 2, 'n_invalid': 1}, + {'n_voters': 3, 'n_valid': 2, 'n_invalid': 2}, + {'n_voters': 4, 'n_valid': 3, 'n_invalid': 2}, + {'n_voters': 5, 'n_valid': 3, 'n_invalid': 3}, + {'n_voters': 6, 'n_valid': 4, 'n_invalid': 3}, + {'n_voters': 7, 'n_valid': 4, 'n_invalid': 4}, + {'n_voters': 8, 'n_valid': 5, 'n_invalid': 4} ] From 30db8a4e3027e25b8d77559e641d5fbcd6ee2763 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Fri, 3 Mar 2017 14:20:43 +0100 Subject: [PATCH 23/54] Renamed 'bdb091-server' to 'bdb-server' --- k8s/node-ss.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/node-ss.yaml b/k8s/node-ss.yaml index f660aebc..ecbf1fcd 100644 --- a/k8s/node-ss.yaml +++ b/k8s/node-ss.yaml @@ -51,7 +51,7 @@ spec: spec: terminationGracePeriodSeconds: 10 containers: - - name: bdb091-server + - name: bdb-server image: bigchaindb/bigchaindb:latest args: - -c From e1d7f95a8c874d3eb9c48cf0425acd3e428e60ea Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Fri, 3 Mar 2017 15:57:35 +0100 Subject: [PATCH 24/54] Switch to PVC for RDB in single BDB+RDB StatefulSet --- k8s/node-ss.yaml | 43 ++++++++++++++++++------------------------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/k8s/node-ss.yaml b/k8s/node-ss.yaml index ecbf1fcd..1ec05868 100644 --- a/k8s/node-ss.yaml +++ b/k8s/node-ss.yaml @@ -34,38 +34,34 @@ spec: name: bdb labels: app: bdb - annotations: - pod.beta.kubernetes.io/init-containers: '[ - { - "name": "bdb-configure", - "image": "bigchaindb/bigchaindb:latest", - "command": ["bigchaindb", "-y", "configure", "rethinkdb"], - "volumeMounts": [ - { - "name": "bigchaindb-data", - "mountPath": "/data" - } - ] - } - ]' spec: terminationGracePeriodSeconds: 10 containers: - name: bdb-server image: bigchaindb/bigchaindb:latest args: - - -c - - /data/.bigchaindb - start + env: + - name: BIGCHAINDB_KEYPAIR_PRIVATE + value: 56mEvwwVxcYsFQ3Y8UTFB8DVBv38yoUhxzDW3DAdLVd2 + - name: BIGCHAINDB_KEYPAIR_PUBLIC + value: 9DsHwiEtvk51UHmNM2eV66czFha69j3CdtNrCj1RcZWR + - name: BIGCHAINDB_KEYRING + value: "" + - name: BIGCHAINDB_DATABASE_BACKEND + value: rethinkdb + - name: BIGCHAINDB_DATABASE_PORT + value: "28015" + - name: BIGCHAINDB_DATABASE_HOST + value: localhost + - name: BIGCHAINDB_SERVER_BIND + value: "0.0.0.0:9984" imagePullPolicy: IfNotPresent ports: - containerPort: 9984 hostPort: 9984 name: bdb-port - protocol: TCP - volumeMounts: - - name: bigchaindb-data - mountPath: /data + protocol: TCP resources: limits: cpu: 200m @@ -115,9 +111,6 @@ spec: timeoutSeconds: 10 restartPolicy: Always volumes: - - name: bigchaindb-data - hostPath: - path: /disk/bigchaindb-data - name: rdb-data - hostPath: - path: /disk/rdb-data + persistentVolumeClaim: + claimName: mongoclaim From 43284798febd805de558b1baed6fcd90cb19ec30 Mon Sep 17 00:00:00 2001 From: "krish7919 (Krish)" Date: Fri, 3 Mar 2017 17:34:50 +0100 Subject: [PATCH 25/54] Run bdb:latest with mongodb:3.4.1 as separate containers in the same pod --- k8s/node-mdb-ss.yaml | 116 +++++++++++++++++++++++++ k8s/{node-ss.yaml => node-rdb-ss.yaml} | 0 2 files changed, 116 insertions(+) create mode 100644 k8s/node-mdb-ss.yaml rename k8s/{node-ss.yaml => node-rdb-ss.yaml} (100%) diff --git a/k8s/node-mdb-ss.yaml b/k8s/node-mdb-ss.yaml new file mode 100644 index 00000000..c4a9f2f3 --- /dev/null +++ b/k8s/node-mdb-ss.yaml @@ -0,0 +1,116 @@ +################################################################## +# This config file uses bdb:latest with a spearate mongodb:3.4.1 # +################################################################## + +apiVersion: v1 +kind: Service +metadata: + name: bdb-service + namespace: default + labels: + name: bdb-service +spec: + selector: + app: bdb + ports: + - port: 9984 + targetPort: 9984 + name: bdb-http-api + - port: 27017 + targetPort: 27017 + name: bdb-mdb-port + type: LoadBalancer +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: bdb + namespace: default +spec: + serviceName: bdb + replicas: 1 + template: + metadata: + name: bdb + labels: + app: bdb + annotations: + pod.beta.kubernetes.io/init-containers: '[ + { + "name": "bdb-configure", + "image": "bigchaindb/bigchaindb:latest", + "command": ["bigchaindb", "-y", "configure", "mongodb"], + "volumeMounts": [ + { + "name": "bigchaindb-data", + "mountPath": "/data" + } + ] + } + ]' + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: bdb-server + image: bigchaindb/bigchaindb:latest + args: + - -c + - /data/.bigchaindb + - start + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9984 + hostPort: 9984 + name: bdb-port + protocol: TCP + volumeMounts: + - name: bigchaindb-data + mountPath: /data + resources: + limits: + cpu: 200m + memory: 768Mi + livenessProbe: + httpGet: + path: / + port: bdb-port + initialDelaySeconds: 15 + timeoutSeconds: 10 +# readinessProbe: +# httpGet: +# path: / +# port: bdb-port +# initialDelaySeconds: 15 +# timeoutSeconds: 10 + - name: mongodb + image: mongo:3.4.1 + args: + - --replSet=bigchain-rs + imagePullPolicy: IfNotPresent + ports: + - containerPort: 27017 + hostPort: 27017 + name: mdb-port + protocol: TCP + volumeMounts: + - name: mdb-data + mountPath: /data + resources: + limits: + cpu: 200m + memory: 768Mi + livenessProbe: + tcpSocket: + port: mdb-port + successThreshold: 1 + failureThreshold: 3 + periodSeconds: 15 + timeoutSeconds: 1 + restartPolicy: Always + volumes: + - name: bigchaindb-data + hostPath: + path: /disk/bigchaindb-data + - name: mdb-data + hostPath: + path: /disk/mdb-data diff --git a/k8s/node-ss.yaml b/k8s/node-rdb-ss.yaml similarity index 100% rename from k8s/node-ss.yaml rename to k8s/node-rdb-ss.yaml From 81dee294ea4c4b83b53241a44853f48fa7aeb6fa Mon Sep 17 00:00:00 2001 From: diminator Date: Fri, 17 Feb 2017 09:58:55 +0100 Subject: [PATCH 26/54] unitest for bug --- tests/web/test_outputs.py | 65 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/tests/web/test_outputs.py b/tests/web/test_outputs.py index fd17d46d..0a606991 100644 --- a/tests/web/test_outputs.py +++ b/tests/web/test_outputs.py @@ -47,3 +47,68 @@ def test_get_outputs_endpoint_with_invalid_unspent(client, user_pk): res = client.get(OUTPUTS_ENDPOINT + params) assert expected == res.json assert res.status_code == 400 + + +@pytest.mark.bdb +@pytest.mark.usefixtures('inputs') +def test_get_divisble_transactions_returns_500(b, client): + from bigchaindb.models import Transaction + from bigchaindb.common import crypto + import json + + TX_ENDPOINT = '/api/v1/transactions' + + def mine(tx_list): + block = b.create_block(tx_list) + b.write_block(block) + + # vote the block valid + vote = b.vote(block.id, b.get_last_voted_block().id, True) + b.write_vote(vote) + + alice_priv, alice_pub = crypto.generate_key_pair() + bob_priv, bob_pub = crypto.generate_key_pair() + carly_priv, carly_pub = crypto.generate_key_pair() + + create_tx = Transaction.create([alice_pub], [([alice_pub], 4)]) + create_tx.sign([alice_priv]) + + res = client.post(TX_ENDPOINT, data=json.dumps(create_tx.to_dict())) + assert res.status_code == 202 + + mine([create_tx]) + + transfer_tx = Transaction.transfer(create_tx.to_inputs(), + [([alice_pub], 3), ([bob_pub], 1)], + asset_id=create_tx.id) + transfer_tx.sign([alice_priv]) + + res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx.to_dict())) + assert res.status_code == 202 + + mine([transfer_tx]) + + transfer_tx_carly = Transaction.transfer([transfer_tx.to_inputs()[1]], + [([carly_pub], 1)], + asset_id=create_tx.id) + transfer_tx_carly.sign([bob_priv]) + + res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx_carly.to_dict())) + assert res.status_code == 202 + + mine([transfer_tx_carly]) + + asset_id = create_tx.id + + url = TX_ENDPOINT + "?asset_id=" + asset_id + assert client.get(url).status_code == 200 + assert len(client.get(url).json) == 3 + + url = OUTPUTS_ENDPOINT + '?public_key=' + alice_pub + assert client.get(url).status_code == 200 + + url = OUTPUTS_ENDPOINT + '?public_key=' + bob_pub + assert client.get(url).status_code == 200 + + url = OUTPUTS_ENDPOINT + '?public_key=' + carly_pub + assert client.get(url).status_code == 200 From 56243a57ab8f8112e9a75ccc9f1c94eb9b9abc66 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Fri, 3 Mar 2017 02:00:17 +0100 Subject: [PATCH 27/54] Fix indent --- tests/web/test_outputs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/web/test_outputs.py b/tests/web/test_outputs.py index 0a606991..b8f18d68 100644 --- a/tests/web/test_outputs.py +++ b/tests/web/test_outputs.py @@ -89,8 +89,8 @@ def test_get_divisble_transactions_returns_500(b, client): mine([transfer_tx]) transfer_tx_carly = Transaction.transfer([transfer_tx.to_inputs()[1]], - [([carly_pub], 1)], - asset_id=create_tx.id) + [([carly_pub], 1)], + asset_id=create_tx.id) transfer_tx_carly.sign([bob_priv]) res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx_carly.to_dict())) From f64401eed3b005c4420c852d4e9c9bb913c27229 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Fri, 3 Mar 2017 17:43:25 +0100 Subject: [PATCH 28/54] Only append tx if it meets the conditions fixes #1231 --- bigchaindb/core.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bigchaindb/core.py b/bigchaindb/core.py index 9f93d47a..5c72a12b 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -430,14 +430,13 @@ class Bigchain(object): # check if the owner is in the condition `owners_after` if len(output['public_keys']) == 1: if output['condition']['details']['public_key'] == owner: - tx_link = TransactionLink(tx['id'], index) + links.append(TransactionLink(tx['id'], index)) else: # for transactions with multiple `public_keys` there will be several subfulfillments nested # in the condition. We need to iterate the subfulfillments to make sure there is a # subfulfillment for `owner` if utils.condition_details_has_owner(output['condition']['details'], owner): - tx_link = TransactionLink(tx['id'], index) - links.append(tx_link) + links.append(TransactionLink(tx['id'], index)) return links def get_owned_ids(self, owner): From 93d06b4e2abceb25cb46eecfb9ba43c6120229ab Mon Sep 17 00:00:00 2001 From: "krish7919 (Krish)" Date: Sat, 4 Mar 2017 20:05:30 +0100 Subject: [PATCH 29/54] PVC for MDB in single BDB+MDB StatefulSet --- k8s/node-mdb-ss.yaml | 60 +++++++++++++++++++++----------------------- k8s/node-rdb-ss.yaml | 4 +-- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/k8s/node-mdb-ss.yaml b/k8s/node-mdb-ss.yaml index c4a9f2f3..10f22f0f 100644 --- a/k8s/node-mdb-ss.yaml +++ b/k8s/node-mdb-ss.yaml @@ -34,38 +34,43 @@ spec: name: bdb labels: app: bdb - annotations: - pod.beta.kubernetes.io/init-containers: '[ - { - "name": "bdb-configure", - "image": "bigchaindb/bigchaindb:latest", - "command": ["bigchaindb", "-y", "configure", "mongodb"], - "volumeMounts": [ - { - "name": "bigchaindb-data", - "mountPath": "/data" - } - ] - } - ]' + #annotations: + #pod.beta.kubernetes.io/init-containers: '[ + # TODO mongodb user and group; id = 999 spec: terminationGracePeriodSeconds: 10 containers: - name: bdb-server image: bigchaindb/bigchaindb:latest args: - - -c - - /data/.bigchaindb - start + env: + - name: BIGCHAINDB_KEYPAIR_PRIVATE + value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm + - name: BIGCHAINDB_KEYPAIR_PUBLIC + value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ + - name: BIGCHAINDB_KEYRING + value: "" + - name: BIGCHAINDB_DATABASE_BACKEND + value: mongodb + - name: BIGCHAINDB_DATABASE_HOST + value: localhost + - name: BIGCHAINDB_DATABASE_PORT + value: "27017" + - name: BIGCHAINDB_SERVER_BIND + value: "0.0.0.0:9984" + - name: BIGCHAINDB_DATABASE_REPLICASET + value: bigchain-rs + - name: BIGCHAINDB_DATABASE_NAME + value: bigchain + - name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY + value: "120" imagePullPolicy: IfNotPresent ports: - containerPort: 9984 hostPort: 9984 name: bdb-port - protocol: TCP - volumeMounts: - - name: bigchaindb-data - mountPath: /data + protocol: TCP resources: limits: cpu: 200m @@ -76,12 +81,6 @@ spec: port: bdb-port initialDelaySeconds: 15 timeoutSeconds: 10 -# readinessProbe: -# httpGet: -# path: / -# port: bdb-port -# initialDelaySeconds: 15 -# timeoutSeconds: 10 - name: mongodb image: mongo:3.4.1 args: @@ -91,7 +90,7 @@ spec: - containerPort: 27017 hostPort: 27017 name: mdb-port - protocol: TCP + protocol: TCP volumeMounts: - name: mdb-data mountPath: /data @@ -108,9 +107,6 @@ spec: timeoutSeconds: 1 restartPolicy: Always volumes: - - name: bigchaindb-data - hostPath: - path: /disk/bigchaindb-data - name: mdb-data - hostPath: - path: /disk/mdb-data + persistentVolumeClaim: + claimName: mongoclaim diff --git a/k8s/node-rdb-ss.yaml b/k8s/node-rdb-ss.yaml index 1ec05868..4a995213 100644 --- a/k8s/node-rdb-ss.yaml +++ b/k8s/node-rdb-ss.yaml @@ -50,10 +50,10 @@ spec: value: "" - name: BIGCHAINDB_DATABASE_BACKEND value: rethinkdb - - name: BIGCHAINDB_DATABASE_PORT - value: "28015" - name: BIGCHAINDB_DATABASE_HOST value: localhost + - name: BIGCHAINDB_DATABASE_PORT + value: "28015" - name: BIGCHAINDB_SERVER_BIND value: "0.0.0.0:9984" imagePullPolicy: IfNotPresent From 880729cac2059556eb6a25208a571a4dcb900c21 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Sun, 5 Mar 2017 16:47:12 +0100 Subject: [PATCH 30/54] minor changes to 2 yml files + added RDB intracluster port --- k8s/node-mdb-ss.yaml | 10 ++++++---- k8s/node-rdb-ss.yaml | 25 ++++++++++++++++++++----- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/k8s/node-mdb-ss.yaml b/k8s/node-mdb-ss.yaml index 10f22f0f..304750c2 100644 --- a/k8s/node-mdb-ss.yaml +++ b/k8s/node-mdb-ss.yaml @@ -1,6 +1,8 @@ -################################################################## -# This config file uses bdb:latest with a spearate mongodb:3.4.1 # -################################################################## +################################################################# +# This YAML file desribes a StatefulSet with two containers: # +# bigchaindb/bigchaindb:latest and mongo:3.4.1 # +# It also describes a Service to expose BigchainDB and MongoDB. # +################################################################# apiVersion: v1 kind: Service @@ -18,7 +20,7 @@ spec: name: bdb-http-api - port: 27017 targetPort: 27017 - name: bdb-mdb-port + name: mongodb-port type: LoadBalancer --- apiVersion: apps/v1beta1 diff --git a/k8s/node-rdb-ss.yaml b/k8s/node-rdb-ss.yaml index 4a995213..fc157746 100644 --- a/k8s/node-rdb-ss.yaml +++ b/k8s/node-rdb-ss.yaml @@ -1,6 +1,10 @@ -################################################################## -# This config file uses bdb:latest with a spearate rethinkdb:2.3 # -################################################################## +############################################################## +# This YAML file desribes a StatefulSet with two containers: # +# bigchaindb/bigchaindb:latest and rethinkdb:2.3 # +# It also describes a Service to expose BigchainDB, # +# the RethinkDB intracluster communications port, and # +# the RethinkDB web interface port. # +############################################################## apiVersion: v1 kind: Service @@ -16,9 +20,12 @@ spec: - port: 9984 targetPort: 9984 name: bdb-http-api + - port: 29015 + targetPort: 29015 + name: rdb-intracluster-comm-port - port: 8080 targetPort: 8080 - name: bdb-rethinkdb-api + name: rdb-web-interface-port type: LoadBalancer --- apiVersion: apps/v1beta1 @@ -56,6 +63,10 @@ spec: value: "28015" - name: BIGCHAINDB_SERVER_BIND value: "0.0.0.0:9984" + - name: BIGCHAINDB_DATABASE_NAME + value: bigchain + - name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY + value: "120" imagePullPolicy: IfNotPresent ports: - containerPort: 9984 @@ -84,7 +95,11 @@ spec: ports: - containerPort: 8080 hostPort: 8080 - name: rdb-http-port + name: rdb-web-interface-port + protocol: TCP + - containerPort: 29015 + hostPort: 29015 + name: rdb-intra-port protocol: TCP - containerPort: 28015 hostPort: 28015 From 0ffa93cd441364df2c32a310adb2fa435a1bff97 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Sun, 5 Mar 2017 17:23:42 +0100 Subject: [PATCH 31/54] docs: fixed typo; changed 2Gi to 20Gi in kubectl cmd output --- .../source/cloud-deployment-templates/node-on-kubernetes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst index afb0b438..00b77bb8 100644 --- a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst +++ b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst @@ -154,6 +154,6 @@ but it should become "Bound" fairly quickly. Status: Bound Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21 Labels: - Capacity: 2Gi + Capacity: 20Gi Access Modes: RWO No events. From 33deff8760f19920e068fa6256d313f67a443ec7 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Sun, 5 Mar 2017 17:49:00 +0100 Subject: [PATCH 32/54] docs: add instructions to deploy MongoDB & BigchainDB on k8s cluster --- .../node-on-kubernetes.rst | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst index 00b77bb8..1a8e5deb 100644 --- a/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst +++ b/docs/server/source/cloud-deployment-templates/node-on-kubernetes.rst @@ -157,3 +157,38 @@ but it should become "Bound" fairly quickly. Capacity: 20Gi Access Modes: RWO No events. + + +Step 5: Deploy MongoDB & BigchainDB +----------------------------------- + +Now you can deploy MongoDB and BigchainDB to your Kubernetes cluster. +Currently, the way we do that is we create a StatefulSet with two +containers: BigchainDB and MongoDB. (In the future, we'll put them +in separate pods, and we'll ensure those pods are in different nodes.) +We expose BigchainDB's port 9984 (the HTTP API port) +and MongoDB's port 27017 using a Kubernetes Service. + +Get the file ``node-mdb-ss.yaml`` from GitHub using: + +.. code:: bash + + $ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/node-mdb-ss.yaml + +Take a look inside that file to see how it defines the Service +and the StatefulSet. +Note how the MongoDB container uses the ``mongoclaim`` PersistentVolumeClaim +for its ``/data`` diretory (mount path). + +Create the StatefulSet and Service in your cluster using: + +.. code:: bash + + $ kubectl apply -f node-mdb-ss.yaml + +You can check that they're working using: + +.. code:: bash + + $ kubectl get services + $ kubectl get statefulsets From 1c03ab754c4ea4c9326b812548aa4d7c81ac0d36 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Mon, 6 Mar 2017 11:58:07 +0100 Subject: [PATCH 33/54] Update change log for 0.9.3 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62dac89c..75337b19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,12 @@ For reference, the possible headings are: * **External Contributors** to list contributors outside of BigchainDB GmbH. * **Notes** +## [0.9.3] - 2017-03-06 +Tag name: v0.9.3 + +### Fixed +Fixed HTTP API 500 error on `GET /outputs: issues #1200 and #1231. + ## [0.9.2] - 2017-03-02 Tag name: v0.9.2 From 8bad32dc13bd295a2e0b45e695fc555679ec506f Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Mon, 6 Mar 2017 13:38:34 +0100 Subject: [PATCH 34/54] Fix typo [skip ci] --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75337b19..538d2ccc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,7 @@ For reference, the possible headings are: Tag name: v0.9.3 ### Fixed -Fixed HTTP API 500 error on `GET /outputs: issues #1200 and #1231. +Fixed HTTP API 500 error on `GET /outputs`: issues #1200 and #1231. ## [0.9.2] - 2017-03-02 Tag name: v0.9.2 From 1374f133f9a7b5b34c105e1572643e5f38ccfbcd Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Mon, 6 Mar 2017 14:18:48 +0100 Subject: [PATCH 35/54] remove stray print call --- bigchaindb/backend/mongodb/connection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bigchaindb/backend/mongodb/connection.py b/bigchaindb/backend/mongodb/connection.py index 271d0e8e..8688e243 100644 --- a/bigchaindb/backend/mongodb/connection.py +++ b/bigchaindb/backend/mongodb/connection.py @@ -128,7 +128,6 @@ def _check_replica_set(conn): replSet option. """ options = conn.admin.command('getCmdLineOpts') - print(options) try: repl_opts = options['parsed']['replication'] repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet')) From 6ce8ba9ae3e2f0fcc82aa49bff4be5e8380d6288 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Wed, 22 Feb 2017 17:28:52 +0100 Subject: [PATCH 36/54] Replace logging statements with print that is more or less the recommended approach for command line interfaces see https://docs.python.org/3.6/howto/logging.html#when-to-use-logging --- bigchaindb/commands/bigchain.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index c118f857..69270f7d 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -122,7 +122,7 @@ def run_configure(args, skip_if_exists=False): def run_export_my_pubkey(args): """Export this node's public key to standard output """ - logger.debug('bigchaindb args = {}'.format(args)) + print('bigchaindb args = {}'.format(args)) bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) pubkey = bigchaindb.config['keypair']['public'] if pubkey is not None: @@ -141,9 +141,9 @@ def _run_init(): schema.init_database(connection=b.connection) - logger.info('Create genesis block.') + print('Create genesis block.') b.create_genesis_block() - logger.info('Done, have fun!') + print('Done, have fun!') def run_init(args): @@ -176,8 +176,7 @@ def run_drop(args): def run_start(args): """Start the processes to run the node""" - logger.info('BigchainDB Version {}'.format(bigchaindb.__version__)) - + print('BigchainDB Version {}'.format(bigchaindb.__version__)) bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) if args.allow_temp_keypair: @@ -229,7 +228,7 @@ def _run_load(tx_left, stats): def run_load(args): bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) - logger.info('Starting %s processes', args.multiprocess) + print('Starting %s processes', args.multiprocess) stats = logstats.Logstats() logstats.thread.start(stats) @@ -248,7 +247,7 @@ def run_set_shards(args): try: set_shards(conn, shards=args.num_shards) except OperationError as e: - logger.warn(e) + print(e) def run_set_replicas(args): @@ -256,7 +255,7 @@ def run_set_replicas(args): try: set_replicas(conn, replicas=args.num_replicas) except OperationError as e: - logger.warn(e) + print(e) def run_add_replicas(args): @@ -267,9 +266,9 @@ def run_add_replicas(args): try: add_replicas(conn, args.replicas) except (OperationError, NotImplementedError) as e: - logger.warn(e) + print(e) else: - logger.info('Added {} to the replicaset.'.format(args.replicas)) + print('Added {} to the replicaset.'.format(args.replicas)) def run_remove_replicas(args): @@ -280,9 +279,9 @@ def run_remove_replicas(args): try: remove_replicas(conn, args.replicas) except (OperationError, NotImplementedError) as e: - logger.warn(e) + print(e) else: - logger.info('Removed {} from the replicaset.'.format(args.replicas)) + print('Removed {} from the replicaset.'.format(args.replicas)) def create_parser(): From c1cf79d0e025cc6cae9029d9a0974915b858663a Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Wed, 22 Feb 2017 20:22:12 +0100 Subject: [PATCH 37/54] Pass a message to the exception otherwise nothing gets printed when printing the exception --- bigchaindb/backend/rethinkdb/admin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigchaindb/backend/rethinkdb/admin.py b/bigchaindb/backend/rethinkdb/admin.py index 23b55048..863ffb31 100644 --- a/bigchaindb/backend/rethinkdb/admin.py +++ b/bigchaindb/backend/rethinkdb/admin.py @@ -96,7 +96,7 @@ def reconfigure(connection, *, table, shards, replicas, try: return connection.run(r.table(table).reconfigure(**params)) except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e: - raise OperationError from e + raise OperationError('Failed to reconfigure tables.') from e @register_admin(RethinkDBConnection) From 10026680825a70dbdf719827bcf4476afdd03ae6 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Wed, 22 Feb 2017 20:23:00 +0100 Subject: [PATCH 38/54] Fix broken tests --- tests/commands/rethinkdb/test_commands.py | 14 ++++++++------ tests/commands/test_commands.py | 6 ++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/commands/rethinkdb/test_commands.py b/tests/commands/rethinkdb/test_commands.py index 5fb75f4d..5fc0379e 100644 --- a/tests/commands/rethinkdb/test_commands.py +++ b/tests/commands/rethinkdb/test_commands.py @@ -59,8 +59,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b): mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False) -@patch('logging.Logger.warn') -def test_set_shards_raises_exception(mock_log, monkeypatch, b): +def test_set_shards_raises_exception(monkeypatch, b, capsys): from bigchaindb.commands.bigchain import run_set_shards # test that we are correctly catching the exception @@ -76,7 +75,9 @@ def test_set_shards_raises_exception(mock_log, monkeypatch, b): args = Namespace(num_shards=3) run_set_shards(args) - assert mock_log.called + out, err = capsys.readouterr() + assert out[:-1] == 'Failed to reconfigure tables.' + assert not err @patch('rethinkdb.ast.Table.reconfigure') @@ -103,8 +104,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b): mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False) -@patch('logging.Logger.warn') -def test_set_replicas_raises_exception(mock_log, monkeypatch, b): +def test_set_replicas_raises_exception(monkeypatch, b, capsys): from bigchaindb.commands.bigchain import run_set_replicas # test that we are correctly catching the exception @@ -120,4 +120,6 @@ def test_set_replicas_raises_exception(mock_log, monkeypatch, b): args = Namespace(num_replicas=2) run_set_replicas(args) - assert mock_log.called + out, err = capsys.readouterr() + assert out[:-1] == 'Failed to reconfigure tables.' + assert not err diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index f806eb7c..5b206762 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -132,8 +132,10 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): _, _ = capsys.readouterr() # has the effect of clearing capsys run_export_my_pubkey(args) out, err = capsys.readouterr() - assert out == config['keypair']['public'] + '\n' - assert out == 'Charlie_Bucket\n' + lines = out.splitlines() + assert config['keypair']['public'] in lines + assert 'Charlie_Bucket' in lines + assert 'bigchaindb args = {}'.format(args) in lines def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch): From 894784b9e412e984af020b763251f1a59659a6f2 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 28 Feb 2017 17:24:24 +0100 Subject: [PATCH 39/54] Add pytest-mock - a thin wrapper around mock --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 1ff6bdbb..dadd7385 100644 --- a/setup.py +++ b/setup.py @@ -50,6 +50,7 @@ tests_require = [ 'pytest>=3.0.0', 'pytest-catchlog>=1.2.2', 'pytest-cov>=2.2.1', + 'pytest-mock', 'pytest-xdist', 'pytest-flask', 'tox', From 4e82615845b834dc587826068a4c82389cdb052b Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 28 Feb 2017 17:26:28 +0100 Subject: [PATCH 40/54] Add module to hold messages used in commands --- bigchaindb/commands/messages.py | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 bigchaindb/commands/messages.py diff --git a/bigchaindb/commands/messages.py b/bigchaindb/commands/messages.py new file mode 100644 index 00000000..c65fe973 --- /dev/null +++ b/bigchaindb/commands/messages.py @@ -0,0 +1,10 @@ +"""Module to store messages used in commands, such as error messages, +warnings, prompts, etc. + +""" +CANNOT_START_KEYPAIR_NOT_FOUND = ( + "Can't start BigchainDB, no keypair found. " + 'Did you run `bigchaindb configure`?' +) + +RETHINKDB_STARTUP_ERROR = 'Error starting RethinkDB, reason is: {}' From d1d994f0e76df8c5e666f79176531b4697753509 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 28 Feb 2017 17:27:42 +0100 Subject: [PATCH 41/54] Add fixture for run_start cmdline args Note: has the possibility to be parametrized. --- tests/commands/conftest.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/commands/conftest.py b/tests/commands/conftest.py index 1cffbc2f..fde478b5 100644 --- a/tests/commands/conftest.py +++ b/tests/commands/conftest.py @@ -1,3 +1,5 @@ +from argparse import Namespace + import pytest @@ -38,3 +40,13 @@ def mock_bigchaindb_backup_config(monkeypatch): 'backlog_reassign_delay': 5 } monkeypatch.setattr('bigchaindb._config', config) + + +@pytest.fixture +def run_start_args(request): + param = getattr(request, 'param', {}) + return Namespace( + config=param.get('config'), + start_rethinkdb=param.get('start_rethinkdb', False), + allow_temp_keypair=param.get('allow_temp_keypair', False), + ) From 24eb18fb59daf19b6db4b7fa219ab992c1056722 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 28 Feb 2017 17:30:44 +0100 Subject: [PATCH 42/54] Add tests for errors on startup --- tests/commands/test_commands.py | 58 +++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 5b206762..9d071131 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -304,6 +304,64 @@ def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair, assert bigchaindb.config['keypair']['public'] == original_public_key +def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args): + from bigchaindb.commands.bigchain import run_start + from bigchaindb.common.exceptions import DatabaseAlreadyExists + mocked_start = mocker.patch('bigchaindb.processes.start') + + def mock_run_init(): + raise DatabaseAlreadyExists() + + monkeypatch.setattr( + 'bigchaindb.commands.bigchain._run_init', mock_run_init) + run_start(run_start_args) + assert mocked_start.called + + +def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args): + from bigchaindb.commands.bigchain import run_start + from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND + from bigchaindb.common.exceptions import KeypairNotFoundException + mocked_start = mocker.patch('bigchaindb.processes.start') + + def mock_run_init(): + raise KeypairNotFoundException() + + monkeypatch.setattr( + 'bigchaindb.commands.bigchain._run_init', mock_run_init) + + with pytest.raises(SystemExit) as exc: + run_start(run_start_args) + + assert len(exc.value.args) == 1 + assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND + assert not mocked_start.called + + +def test_run_start_when_start_rethinkdb_fails(mocker, + monkeypatch, + run_start_args): + from bigchaindb.commands.bigchain import run_start + from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR + from bigchaindb.common.exceptions import StartupError + run_start_args.start_rethinkdb = True + mocked_start = mocker.patch('bigchaindb.processes.start') + err_msg = 'Error starting rethinkdb.' + + def mock_start_rethinkdb(): + raise StartupError(err_msg) + + monkeypatch.setattr( + 'bigchaindb.commands.utils.start_rethinkdb', mock_start_rethinkdb) + + with pytest.raises(SystemExit) as exc: + run_start(run_start_args) + + assert len(exc.value.args) == 1 + assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg) + assert not mocked_start.called + + @patch('argparse.ArgumentParser.parse_args') @patch('bigchaindb.commands.utils.base_parser') @patch('bigchaindb.commands.utils.start') From edc5887b42510c3e049a96bba1146e619245b04e Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Tue, 28 Feb 2017 17:31:19 +0100 Subject: [PATCH 43/54] Use hardcoded (constant) messages This provides a bit of re-usability and helps testing. --- bigchaindb/commands/bigchain.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 69270f7d..69124661 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -26,6 +26,10 @@ from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas, remove_replicas) from bigchaindb.backend.exceptions import OperationError from bigchaindb.commands import utils +from bigchaindb.commands.messages import ( + CANNOT_START_KEYPAIR_NOT_FOUND, + RETHINKDB_STARTUP_ERROR, +) from bigchaindb import processes @@ -193,7 +197,7 @@ def run_start(args): try: proc = utils.start_rethinkdb() except StartupError as e: - sys.exit('Error starting RethinkDB, reason is: {}'.format(e)) + sys.exit(RETHINKDB_STARTUP_ERROR.format(e)) logger.info('RethinkDB started with PID %s' % proc.pid) try: @@ -201,8 +205,7 @@ def run_start(args): except DatabaseAlreadyExists: pass except KeypairNotFoundException: - sys.exit("Can't start BigchainDB, no keypair found. " - 'Did you run `bigchaindb configure`?') + sys.exit(CANNOT_START_KEYPAIR_NOT_FOUND) logger.info('Starting BigchainDB main process with public key %s', bigchaindb.config['keypair']['public']) From 2e398f606f666bd9fa4f6285d46bf71cf635375f Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Thu, 2 Mar 2017 14:36:52 +0100 Subject: [PATCH 44/54] Make some improvements to command line messages and error handling --- bigchaindb/commands/bigchain.py | 16 +++++++-------- tests/commands/rethinkdb/test_commands.py | 20 ++++++++---------- tests/commands/test_commands.py | 25 +++++++++++++++-------- 3 files changed, 31 insertions(+), 30 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 69124661..6b8bdb07 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -126,7 +126,6 @@ def run_configure(args, skip_if_exists=False): def run_export_my_pubkey(args): """Export this node's public key to standard output """ - print('bigchaindb args = {}'.format(args)) bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) pubkey = bigchaindb.config['keypair']['public'] if pubkey is not None: @@ -145,9 +144,8 @@ def _run_init(): schema.init_database(connection=b.connection) - print('Create genesis block.') b.create_genesis_block() - print('Done, have fun!') + logger.info('Genesis block created.') def run_init(args): @@ -180,7 +178,7 @@ def run_drop(args): def run_start(args): """Start the processes to run the node""" - print('BigchainDB Version {}'.format(bigchaindb.__version__)) + logger.info('BigchainDB Version %s', bigchaindb.__version__) bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) if args.allow_temp_keypair: @@ -231,7 +229,7 @@ def _run_load(tx_left, stats): def run_load(args): bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) - print('Starting %s processes', args.multiprocess) + logger.info('Starting %s processes', args.multiprocess) stats = logstats.Logstats() logstats.thread.start(stats) @@ -250,7 +248,7 @@ def run_set_shards(args): try: set_shards(conn, shards=args.num_shards) except OperationError as e: - print(e) + sys.exit(str(e)) def run_set_replicas(args): @@ -258,7 +256,7 @@ def run_set_replicas(args): try: set_replicas(conn, replicas=args.num_replicas) except OperationError as e: - print(e) + sys.exit(str(e)) def run_add_replicas(args): @@ -269,7 +267,7 @@ def run_add_replicas(args): try: add_replicas(conn, args.replicas) except (OperationError, NotImplementedError) as e: - print(e) + sys.exit(str(e)) else: print('Added {} to the replicaset.'.format(args.replicas)) @@ -282,7 +280,7 @@ def run_remove_replicas(args): try: remove_replicas(conn, args.replicas) except (OperationError, NotImplementedError) as e: - print(e) + sys.exit(str(e)) else: print('Removed {} from the replicaset.'.format(args.replicas)) diff --git a/tests/commands/rethinkdb/test_commands.py b/tests/commands/rethinkdb/test_commands.py index 5fc0379e..5208587e 100644 --- a/tests/commands/rethinkdb/test_commands.py +++ b/tests/commands/rethinkdb/test_commands.py @@ -59,7 +59,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b): mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False) -def test_set_shards_raises_exception(monkeypatch, b, capsys): +def test_set_shards_raises_exception(monkeypatch, b): from bigchaindb.commands.bigchain import run_set_shards # test that we are correctly catching the exception @@ -73,11 +73,9 @@ def test_set_shards_raises_exception(monkeypatch, b, capsys): monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise) args = Namespace(num_shards=3) - run_set_shards(args) - - out, err = capsys.readouterr() - assert out[:-1] == 'Failed to reconfigure tables.' - assert not err + with pytest.raises(SystemExit) as exc: + run_set_shards(args) + assert exc.value.args == ('Failed to reconfigure tables.',) @patch('rethinkdb.ast.Table.reconfigure') @@ -104,7 +102,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b): mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False) -def test_set_replicas_raises_exception(monkeypatch, b, capsys): +def test_set_replicas_raises_exception(monkeypatch, b): from bigchaindb.commands.bigchain import run_set_replicas # test that we are correctly catching the exception @@ -118,8 +116,6 @@ def test_set_replicas_raises_exception(monkeypatch, b, capsys): monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise) args = Namespace(num_replicas=2) - run_set_replicas(args) - - out, err = capsys.readouterr() - assert out[:-1] == 'Failed to reconfigure tables.' - assert not err + with pytest.raises(SystemExit) as exc: + run_set_replicas(args) + assert exc.value.args == ('Failed to reconfigure tables.',) diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 9d071131..991f4961 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -135,7 +135,6 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): lines = out.splitlines() assert config['keypair']['public'] in lines assert 'Charlie_Bucket' in lines - assert 'bigchaindb args = {}'.format(args) in lines def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch): @@ -455,14 +454,18 @@ def test_run_add_replicas(mock_add_replicas): mock_add_replicas.reset_mock() # test add_replicas with `OperationError` - mock_add_replicas.side_effect = OperationError() - assert run_add_replicas(args) is None + mock_add_replicas.side_effect = OperationError('err') + with pytest.raises(SystemExit) as exc: + run_add_replicas(args) + assert exc.value.args == ('err',) assert mock_add_replicas.call_count == 1 mock_add_replicas.reset_mock() # test add_replicas with `NotImplementedError` - mock_add_replicas.side_effect = NotImplementedError() - assert run_add_replicas(args) is None + mock_add_replicas.side_effect = NotImplementedError('err') + with pytest.raises(SystemExit) as exc: + run_add_replicas(args) + assert exc.value.args == ('err',) assert mock_add_replicas.call_count == 1 mock_add_replicas.reset_mock() @@ -482,14 +485,18 @@ def test_run_remove_replicas(mock_remove_replicas): mock_remove_replicas.reset_mock() # test add_replicas with `OperationError` - mock_remove_replicas.side_effect = OperationError() - assert run_remove_replicas(args) is None + mock_remove_replicas.side_effect = OperationError('err') + with pytest.raises(SystemExit) as exc: + run_remove_replicas(args) + assert exc.value.args == ('err',) assert mock_remove_replicas.call_count == 1 mock_remove_replicas.reset_mock() # test add_replicas with `NotImplementedError` - mock_remove_replicas.side_effect = NotImplementedError() - assert run_remove_replicas(args) is None + mock_remove_replicas.side_effect = NotImplementedError('err') + with pytest.raises(SystemExit) as exc: + run_remove_replicas(args) + assert exc.value.args == ('err',) assert mock_remove_replicas.call_count == 1 mock_remove_replicas.reset_mock() From 7e0e46e8205d6e96efdb1838400fb52b9c202d56 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Mon, 6 Mar 2017 15:53:48 +0100 Subject: [PATCH 45/54] Pass host and port to rethinkdb connection This is needed when running the tests in containers for instance --- tests/backend/rethinkdb/test_connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/backend/rethinkdb/test_connection.py b/tests/backend/rethinkdb/test_connection.py index 880862af..df393716 100644 --- a/tests/backend/rethinkdb/test_connection.py +++ b/tests/backend/rethinkdb/test_connection.py @@ -46,7 +46,7 @@ def test_raise_exception_when_max_tries(): conn.run(MockQuery()) -def test_reconnect_when_connection_lost(): +def test_reconnect_when_connection_lost(db_host, db_port): from bigchaindb.backend import connect original_connect = r.connect @@ -54,7 +54,7 @@ def test_reconnect_when_connection_lost(): with patch('rethinkdb.connect') as mock_connect: mock_connect.side_effect = [ r.ReqlDriverError('mock'), - original_connect() + original_connect(host=db_host, port=db_port) ] conn = connect() From 9bb2ec276a3bd521a6cf9301972fab8145b329bb Mon Sep 17 00:00:00 2001 From: Scott Sadler Date: Tue, 7 Mar 2017 11:24:21 +0100 Subject: [PATCH 46/54] clarify interface of Voting.count_votes --- bigchaindb/voting.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bigchaindb/voting.py b/bigchaindb/voting.py index b4e8a9e9..57a0192b 100644 --- a/bigchaindb/voting.py +++ b/bigchaindb/voting.py @@ -68,6 +68,12 @@ class Voting: * Detect if there are multiple votes from a single node and return them in a separate "cheat" dictionary. * Votes must agree on previous block, otherwise they become invalid. + + note: + The sum of votes returned by this function does not neccesarily + equal the length of the list of votes fed in. It may differ for + example if there are found to be multiple votes submitted by a + single voter. """ prev_blocks = collections.Counter() cheat = [] From 083d1678ced0c6fd7b5c3ce58a77eee451ad1482 Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Tue, 7 Mar 2017 11:24:26 +0100 Subject: [PATCH 47/54] Must update meaning of 'latest' image on Docker Hub when do a minor release --- Release_Process.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Release_Process.md b/Release_Process.md index a4e3d427..ec51ceaf 100644 --- a/Release_Process.md +++ b/Release_Process.md @@ -14,10 +14,8 @@ A minor release is preceeded by a feature freeze and created from the 'master' b 1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end) 1. Commit that change, and push the new branch to GitHub 1. Follow steps outlined in [Common Steps](#common-steps) -1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev' -This is so people reading the latest docs will know that they're for the latest (master branch) -version of BigchainDB Server, not the docs at the time of the most recent release (which are also -available). +1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev`. This is so people reading the latest docs will know that they're for the latest (master branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available). +1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9` Congratulations, you have released BigchainDB! From 33bef7d99305cf356cf8f8d6179af37ada6cb792 Mon Sep 17 00:00:00 2001 From: libscott Date: Tue, 7 Mar 2017 11:29:16 +0100 Subject: [PATCH 48/54] I can't spell neccesarily --- bigchaindb/voting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigchaindb/voting.py b/bigchaindb/voting.py index 57a0192b..b12bc4df 100644 --- a/bigchaindb/voting.py +++ b/bigchaindb/voting.py @@ -70,7 +70,7 @@ class Voting: * Votes must agree on previous block, otherwise they become invalid. note: - The sum of votes returned by this function does not neccesarily + The sum of votes returned by this function does not necessarily equal the length of the list of votes fed in. It may differ for example if there are found to be multiple votes submitted by a single voter. From 0ad0d89fcd45267d64c592842178028bf7dc4268 Mon Sep 17 00:00:00 2001 From: Brett Sun Date: Fri, 24 Feb 2017 11:05:28 +0100 Subject: [PATCH 49/54] Move `input_on_stderr()` to commands.utils module --- bigchaindb/commands/bigchain.py | 15 +++------------ bigchaindb/commands/utils.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 6b8bdb07..667ceba8 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -3,12 +3,11 @@ the command-line interface (CLI) for BigchainDB Server. """ import os -import sys import logging import argparse import copy import json -import builtins +import sys import logstats @@ -20,7 +19,7 @@ import bigchaindb import bigchaindb.config_utils from bigchaindb.models import Transaction from bigchaindb.utils import ProcessGroup -from bigchaindb import backend +from bigchaindb import backend, processes from bigchaindb.backend import schema from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas, remove_replicas) @@ -30,21 +29,13 @@ from bigchaindb.commands.messages import ( CANNOT_START_KEYPAIR_NOT_FOUND, RETHINKDB_STARTUP_ERROR, ) -from bigchaindb import processes +from bigchaindb.commands.utils import input_on_stderr logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -# We need this because `input` always prints on stdout, while it should print -# to stderr. It's a very old bug, check it out here: -# - https://bugs.python.org/issue1927 -def input_on_stderr(prompt=''): - print(prompt, end='', file=sys.stderr) - return builtins.input() - - def run_show_config(args): """Show the current configuration""" # TODO Proposal: remove the "hidden" configuration. Only show config. If diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index 80ee7a6b..d3bcbbd4 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -3,8 +3,10 @@ for ``argparse.ArgumentParser``. """ import argparse +import builtins import multiprocessing as mp import subprocess +import sys import rethinkdb as r from pymongo import uri_parser @@ -15,6 +17,14 @@ from bigchaindb.common.exceptions import StartupError from bigchaindb.version import __version__ +# We need this because `input` always prints on stdout, while it should print +# to stderr. It's a very old bug, check it out here: +# - https://bugs.python.org/issue1927 +def input_on_stderr(prompt=''): + print(prompt, end='', file=sys.stderr) + return builtins.input() + + def start_rethinkdb(): """Start RethinkDB as a child process and wait for it to be available. From fce6b6af522ca637002a93ffb36ccd8e53ef56cc Mon Sep 17 00:00:00 2001 From: Brett Sun Date: Fri, 24 Feb 2017 11:16:17 +0100 Subject: [PATCH 50/54] Standardize output streams for outputs of commands --- bigchaindb/commands/bigchain.py | 8 +++++++- tests/commands/test_commands.py | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 667ceba8..4134bb0c 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -36,6 +36,12 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) +# Note about printing: +# We try to print to stdout for results of a command that may be useful to +# someone (or another program). Strictly informational text, or errors, +# should be printed to stderr. + + def run_show_config(args): """Show the current configuration""" # TODO Proposal: remove the "hidden" configuration. Only show config. If @@ -84,7 +90,7 @@ def run_configure(args, skip_if_exists=False): # select the correct config defaults based on the backend print('Generating default configuration for backend {}' - .format(args.backend)) + .format(args.backend), file=sys.stderr) conf['database'] = bigchaindb._database_map[args.backend] if not args.yes: diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 991f4961..95587e6c 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -133,8 +133,10 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): run_export_my_pubkey(args) out, err = capsys.readouterr() lines = out.splitlines() + err_lines = err.splitlines() assert config['keypair']['public'] in lines assert 'Charlie_Bucket' in lines + assert 'bigchaindb args = {}'.format(args) in err_lines def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch): From df9fd6dc23cffbb78541d0cc2f888a9d8034e9cf Mon Sep 17 00:00:00 2001 From: Brett Sun Date: Fri, 24 Feb 2017 11:25:45 +0100 Subject: [PATCH 51/54] Move arguments related only to `start` command to be under `start` --- bigchaindb/commands/bigchain.py | 24 ++++++++++++------------ tests/commands/test_commands.py | 15 +++++++++------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 4134bb0c..2d5c4201 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -287,16 +287,6 @@ def create_parser(): description='Control your BigchainDB node.', parents=[utils.base_parser]) - parser.add_argument('--dev-start-rethinkdb', - dest='start_rethinkdb', - action='store_true', - help='Run RethinkDB on start') - - parser.add_argument('--dev-allow-temp-keypair', - dest='allow_temp_keypair', - action='store_true', - help='Generate a random keypair on start') - # all the commands are contained in the subparsers object, # the command selected by the user will be stored in `args.command` # that is used by the `main` function to select which other @@ -328,8 +318,18 @@ def create_parser(): help='Drop the database') # parser for starting BigchainDB - subparsers.add_parser('start', - help='Start BigchainDB') + start_parser = subparsers.add_parser('start', + help='Start BigchainDB') + + start_parser.add_argument('--dev-allow-temp-keypair', + dest='allow_temp_keypair', + action='store_true', + help='Generate a random keypair on start') + + start_parser.add_argument('--dev-start-rethinkdb', + dest='start_rethinkdb', + action='store_true', + help='Run RethinkDB on start') # parser for configuring the number of shards sharding_parser = subparsers.add_parser('set-shards', diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 95587e6c..cb9e7641 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -381,11 +381,6 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock, main() assert argparser_mock.called is True - assert parser.add_argument.called is True - parser.add_argument.assert_any_call('--dev-start-rethinkdb', - dest='start_rethinkdb', - action='store_true', - help='Run RethinkDB on start') parser.add_subparsers.assert_called_with(title='Commands', dest='command') subparsers.add_parser.assert_any_call('configure', @@ -399,11 +394,19 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock, 'key') subparsers.add_parser.assert_any_call('init', help='Init the database') subparsers.add_parser.assert_any_call('drop', help='Drop the database') + subparsers.add_parser.assert_any_call('start', help='Start BigchainDB') + subsubparsers.add_argument.assert_any_call('--dev-start-rethinkdb', + dest='start_rethinkdb', + action='store_true', + help='Run RethinkDB on start') + subsubparsers.add_argument.assert_any_call('--dev-allow-temp-keypair', + dest='allow_temp_keypair', + action='store_true', + help='Generate a random keypair on start') subparsers.add_parser.assert_any_call('set-shards', help='Configure number of shards') - subsubparsers.add_argument.assert_any_call('num_shards', metavar='num_shards', type=int, default=1, From 54ea18dd2badd08d18d42c2cc00a79b2b4f76960 Mon Sep 17 00:00:00 2001 From: Brett Sun Date: Fri, 24 Feb 2017 11:57:32 +0100 Subject: [PATCH 52/54] Use decorator to automatically configure before starting any command (that requires configuration) --- bigchaindb/commands/bigchain.py | 21 +++++++++++---------- bigchaindb/commands/utils.py | 11 +++++++++++ tests/commands/rethinkdb/test_commands.py | 8 ++++---- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/bigchaindb/commands/bigchain.py b/bigchaindb/commands/bigchain.py index 2d5c4201..efefa9d7 100644 --- a/bigchaindb/commands/bigchain.py +++ b/bigchaindb/commands/bigchain.py @@ -16,7 +16,6 @@ from bigchaindb.common.exceptions import (StartupError, DatabaseAlreadyExists, KeypairNotFoundException) import bigchaindb -import bigchaindb.config_utils from bigchaindb.models import Transaction from bigchaindb.utils import ProcessGroup from bigchaindb import backend, processes @@ -29,7 +28,7 @@ from bigchaindb.commands.messages import ( CANNOT_START_KEYPAIR_NOT_FOUND, RETHINKDB_STARTUP_ERROR, ) -from bigchaindb.commands.utils import input_on_stderr +from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr logging.basicConfig(level=logging.INFO) @@ -42,12 +41,12 @@ logger = logging.getLogger(__name__) # should be printed to stderr. +@configure_bigchaindb def run_show_config(args): """Show the current configuration""" # TODO Proposal: remove the "hidden" configuration. Only show config. If # the system needs to be configured, then display information on how to # configure the system. - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) config = copy.deepcopy(bigchaindb.config) del config['CONFIGURED'] private_key = config['keypair']['private'] @@ -120,10 +119,10 @@ def run_configure(args, skip_if_exists=False): print('Ready to go!', file=sys.stderr) +@configure_bigchaindb def run_export_my_pubkey(args): """Export this node's public key to standard output """ - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) pubkey = bigchaindb.config['keypair']['public'] if pubkey is not None: print(pubkey) @@ -145,9 +144,9 @@ def _run_init(): logger.info('Genesis block created.') +@configure_bigchaindb def run_init(args): """Initialize the database""" - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) # TODO Provide mechanism to: # 1. prompt the user to inquire whether they wish to drop the db # 2. force the init, (e.g., via -f flag) @@ -158,9 +157,9 @@ def run_init(args): print('If you wish to re-initialize it, first drop it.', file=sys.stderr) +@configure_bigchaindb def run_drop(args): """Drop the database""" - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) dbname = bigchaindb.config['database']['name'] if not args.yes: @@ -173,10 +172,10 @@ def run_drop(args): schema.drop_database(conn, dbname) +@configure_bigchaindb def run_start(args): """Start the processes to run the node""" logger.info('BigchainDB Version %s', bigchaindb.__version__) - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) if args.allow_temp_keypair: if not (bigchaindb.config['keypair']['private'] or @@ -224,8 +223,8 @@ def _run_load(tx_left, stats): break +@configure_bigchaindb def run_load(args): - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) logger.info('Starting %s processes', args.multiprocess) stats = logstats.Logstats() logstats.thread.start(stats) @@ -240,6 +239,7 @@ def run_load(args): workers.start() +@configure_bigchaindb def run_set_shards(args): conn = backend.connect() try: @@ -248,6 +248,7 @@ def run_set_shards(args): sys.exit(str(e)) +@configure_bigchaindb def run_set_replicas(args): conn = backend.connect() try: @@ -256,9 +257,9 @@ def run_set_replicas(args): sys.exit(str(e)) +@configure_bigchaindb def run_add_replicas(args): # Note: This command is specific to MongoDB - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) conn = backend.connect() try: @@ -269,9 +270,9 @@ def run_add_replicas(args): print('Added {} to the replicaset.'.format(args.replicas)) +@configure_bigchaindb def run_remove_replicas(args): # Note: This command is specific to MongoDB - bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) conn = backend.connect() try: diff --git a/bigchaindb/commands/utils.py b/bigchaindb/commands/utils.py index d3bcbbd4..b04499d9 100644 --- a/bigchaindb/commands/utils.py +++ b/bigchaindb/commands/utils.py @@ -4,6 +4,7 @@ for ``argparse.ArgumentParser``. import argparse import builtins +import functools import multiprocessing as mp import subprocess import sys @@ -12,11 +13,21 @@ import rethinkdb as r from pymongo import uri_parser import bigchaindb +import bigchaindb.config_utils from bigchaindb import backend from bigchaindb.common.exceptions import StartupError from bigchaindb.version import __version__ +def configure_bigchaindb(command): + @functools.wraps(command) + def configure(args): + bigchaindb.config_utils.autoconfigure(filename=args.config, force=True) + command(args) + + return configure + + # We need this because `input` always prints on stdout, while it should print # to stderr. It's a very old bug, check it out here: # - https://bugs.python.org/issue1927 diff --git a/tests/commands/rethinkdb/test_commands.py b/tests/commands/rethinkdb/test_commands.py index 5208587e..f0ae1090 100644 --- a/tests/commands/rethinkdb/test_commands.py +++ b/tests/commands/rethinkdb/test_commands.py @@ -45,7 +45,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b): return {'shards': [{'replicas': [1]}]} monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica) - args = Namespace(num_shards=3) + args = Namespace(num_shards=3, config=None) run_set_shards(args) mock_reconfigure.assert_called_with(replicas=1, shards=3, dry_run=False) @@ -72,7 +72,7 @@ def test_set_shards_raises_exception(monkeypatch, b): monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica) monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise) - args = Namespace(num_shards=3) + args = Namespace(num_shards=3, config=None) with pytest.raises(SystemExit) as exc: run_set_shards(args) assert exc.value.args == ('Failed to reconfigure tables.',) @@ -88,7 +88,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b): return {'shards': [1, 2]} monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards) - args = Namespace(num_replicas=2) + args = Namespace(num_replicas=2, config=None) run_set_replicas(args) mock_reconfigure.assert_called_with(replicas=2, shards=2, dry_run=False) @@ -115,7 +115,7 @@ def test_set_replicas_raises_exception(monkeypatch, b): monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards) monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise) - args = Namespace(num_replicas=2) + args = Namespace(num_replicas=2, config=None) with pytest.raises(SystemExit) as exc: run_set_replicas(args) assert exc.value.args == ('Failed to reconfigure tables.',) From ffe0eb60b9d776fde13e201bc68b7b02b3e52913 Mon Sep 17 00:00:00 2001 From: Brett Sun Date: Fri, 24 Feb 2017 13:44:50 +0100 Subject: [PATCH 53/54] Move tests related to commands/tests.py to separate test file --- tests/commands/test_commands.py | 54 +--------------------------- tests/commands/test_utils.py | 63 +++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 53 deletions(-) create mode 100644 tests/commands/test_utils.py diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index cb9e7641..37bec6fa 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -1,6 +1,6 @@ import json from unittest.mock import Mock, patch -from argparse import Namespace, ArgumentTypeError +from argparse import Namespace import copy import pytest @@ -26,42 +26,6 @@ def test_make_sure_we_dont_remove_any_command(): assert parser.parse_args(['remove-replicas', 'localhost:27017']).command -def test_start_raises_if_command_not_implemented(): - from bigchaindb.commands.bigchain import utils - from bigchaindb.commands.bigchain import create_parser - - parser = create_parser() - - with pytest.raises(NotImplementedError): - # Will raise because `scope`, the third parameter, - # doesn't contain the function `run_start` - utils.start(parser, ['start'], {}) - - -def test_start_raises_if_no_arguments_given(): - from bigchaindb.commands.bigchain import utils - from bigchaindb.commands.bigchain import create_parser - - parser = create_parser() - - with pytest.raises(SystemExit): - utils.start(parser, [], {}) - - -@patch('multiprocessing.cpu_count', return_value=42) -def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count): - from bigchaindb.commands.bigchain import utils - from bigchaindb.commands.bigchain import create_parser - - def run_load(args): - return args - - parser = create_parser() - - assert utils.start(parser, ['load'], {'run_load': run_load}).multiprocess == 1 - assert utils.start(parser, ['load', '--multiprocess'], {'run_load': run_load}).multiprocess == 42 - - @patch('bigchaindb.commands.utils.start') def test_main_entrypoint(mock_start): from bigchaindb.commands.bigchain import main @@ -504,19 +468,3 @@ def test_run_remove_replicas(mock_remove_replicas): assert exc.value.args == ('err',) assert mock_remove_replicas.call_count == 1 mock_remove_replicas.reset_mock() - - -def test_mongodb_host_type(): - from bigchaindb.commands.utils import mongodb_host - - # bad port provided - with pytest.raises(ArgumentTypeError): - mongodb_host('localhost:11111111111') - - # no port information provided - with pytest.raises(ArgumentTypeError): - mongodb_host('localhost') - - # bad host provided - with pytest.raises(ArgumentTypeError): - mongodb_host(':27017') diff --git a/tests/commands/test_utils.py b/tests/commands/test_utils.py new file mode 100644 index 00000000..aadd24b5 --- /dev/null +++ b/tests/commands/test_utils.py @@ -0,0 +1,63 @@ +import argparse +import pytest + +from unittest.mock import patch + + +def test_start_raises_if_command_not_implemented(): + from bigchaindb.commands import utils + from bigchaindb.commands.bigchain import create_parser + + parser = create_parser() + + with pytest.raises(NotImplementedError): + # Will raise because `scope`, the third parameter, + # doesn't contain the function `run_start` + utils.start(parser, ['start'], {}) + + +def test_start_raises_if_no_arguments_given(): + from bigchaindb.commands import utils + from bigchaindb.commands.bigchain import create_parser + + parser = create_parser() + + with pytest.raises(SystemExit): + utils.start(parser, [], {}) + + +@patch('multiprocessing.cpu_count', return_value=42) +def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count): + from bigchaindb.commands import utils + + def run_mp_arg_test(args): + return args + + parser = argparse.ArgumentParser() + subparser = parser.add_subparsers(title='Commands', + dest='command') + mp_arg_test_parser = subparser.add_parser('mp_arg_test') + mp_arg_test_parser.add_argument('-m', '--multiprocess', + nargs='?', + type=int, + default=False) + + scope = {'run_mp_arg_test': run_mp_arg_test} + assert utils.start(parser, ['mp_arg_test'], scope).multiprocess == 1 + assert utils.start(parser, ['mp_arg_test', '--multiprocess'], scope).multiprocess == 42 + + +def test_mongodb_host_type(): + from bigchaindb.commands.utils import mongodb_host + + # bad port provided + with pytest.raises(argparse.ArgumentTypeError): + mongodb_host('localhost:11111111111') + + # no port information provided + with pytest.raises(argparse.ArgumentTypeError): + mongodb_host('localhost') + + # bad host provided + with pytest.raises(argparse.ArgumentTypeError): + mongodb_host(':27017') From 75d0a917d7bac8d16d98f058b42cb6821001eeb8 Mon Sep 17 00:00:00 2001 From: Sylvain Bellemare Date: Mon, 6 Mar 2017 15:25:50 +0100 Subject: [PATCH 54/54] Remove stderr check --- tests/commands/test_commands.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 37bec6fa..e10b3157 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -95,12 +95,10 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket') _, _ = capsys.readouterr() # has the effect of clearing capsys run_export_my_pubkey(args) - out, err = capsys.readouterr() + out, _ = capsys.readouterr() lines = out.splitlines() - err_lines = err.splitlines() assert config['keypair']['public'] in lines assert 'Charlie_Bucket' in lines - assert 'bigchaindb args = {}'.format(args) in err_lines def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):