1
0
mirror of https://github.com/bigchaindb/bigchaindb.git synced 2024-06-23 17:56:41 +02:00

Merge remote-tracking branch 'origin/master' into voting-class-integration

This commit is contained in:
Scott Sadler 2017-03-08 12:54:33 +01:00
commit ffccf86c91
42 changed files with 1082 additions and 379 deletions

View File

@ -15,6 +15,20 @@ For reference, the possible headings are:
* **External Contributors** to list contributors outside of BigchainDB GmbH.
* **Notes**
## [0.9.3] - 2017-03-06
Tag name: v0.9.3
### Fixed
Fixed HTTP API 500 error on `GET /outputs`: issues #1200 and #1231.
## [0.9.2] - 2017-03-02
Tag name: v0.9.2
### Fixed
Pin `python-rapidjson` library in `setup.py` to prevent `bigchaindb`'s
installation to fail due to
https://github.com/python-rapidjson/python-rapidjson/issues/62.
## [0.9.1] - 2017-02-06
Tag name: v0.9.1

View File

@ -2,6 +2,8 @@
There are many ways you can contribute to the BigchainDB project, some very easy and others more involved. We want to be friendly and welcoming to all potential contributors, so we ask that everyone involved abide by some simple guidelines outlined in our [Code of Conduct](./CODE_OF_CONDUCT.md).
Or, are you interested in contributing full-time? BigchainDB is hiring. See [here](https://github.com/bigchaindb/org/blob/master/engjob.md).
## Easy Ways to Contribute
The BigchainDB community has a Google Group and a Gitter chatroom. Our [Community page](https://www.bigchaindb.com/community) has more information about those.

View File

@ -14,10 +14,8 @@ A minor release is preceeded by a feature freeze and created from the 'master' b
1. In `bigchaindb/version.py`, update `__version__` and `__short_version__`, e.g. to `0.9` and `0.9.0` (with no `.dev` on the end)
1. Commit that change, and push the new branch to GitHub
1. Follow steps outlined in [Common Steps](#common-steps)
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev'
This is so people reading the latest docs will know that they're for the latest (master branch)
version of BigchainDB Server, not the docs at the time of the most recent release (which are also
available).
1. In 'master' branch, Edit `bigchaindb/version.py`, increment the minor version to the next planned release, e.g. `0.10.0.dev`. This is so people reading the latest docs will know that they're for the latest (master branch) version of BigchainDB Server, not the docs at the time of the most recent release (which are also available).
1. Go to [Docker Hub](https://hub.docker.com/), sign in, go to Settings - Build Settings, and under the build with Docker Tag Name equal to `latest`, change the Name to the number of the new release, e.g. `0.9`
Congratulations, you have released BigchainDB!

View File

@ -10,6 +10,8 @@ _database_rethinkdb = {
'host': os.environ.get('BIGCHAINDB_DATABASE_HOST', 'localhost'),
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 28015)),
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
'connection_timeout': 5000,
'max_tries': 3,
}
_database_mongodb = {
@ -18,6 +20,8 @@ _database_mongodb = {
'port': int(os.environ.get('BIGCHAINDB_DATABASE_PORT', 27017)),
'name': os.environ.get('BIGCHAINDB_DATABASE_NAME', 'bigchain'),
'replicaset': os.environ.get('BIGCHAINDB_DATABASE_REPLICASET', 'bigchain-rs'),
'connection_timeout': 5000,
'max_tries': 3,
}
_database_map = {

View File

@ -1,8 +1,10 @@
from itertools import repeat
from importlib import import_module
import logging
import bigchaindb
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend.exceptions import ConnectionError
BACKENDS = {
@ -13,7 +15,8 @@ BACKENDS = {
logger = logging.getLogger(__name__)
def connect(backend=None, host=None, port=None, name=None, replicaset=None):
def connect(backend=None, host=None, port=None, name=None, max_tries=None,
connection_timeout=None, replicaset=None):
"""Create a new connection to the database backend.
All arguments default to the current configuration's values if not
@ -58,7 +61,9 @@ def connect(backend=None, host=None, port=None, name=None, replicaset=None):
raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc
logger.debug('Connection: {}'.format(Class))
return Class(host, port, dbname, replicaset=replicaset)
return Class(host=host, port=port, dbname=dbname,
max_tries=max_tries, connection_timeout=connection_timeout,
replicaset=replicaset)
class Connection:
@ -68,17 +73,41 @@ class Connection:
from and implements this class.
"""
def __init__(self, host=None, port=None, dbname=None, *args, **kwargs):
def __init__(self, host=None, port=None, dbname=None,
connection_timeout=None, max_tries=None,
**kwargs):
"""Create a new :class:`~.Connection` instance.
Args:
host (str): the host to connect to.
port (int): the port to connect to.
dbname (str): the name of the database to use.
connection_timeout (int, optional): the milliseconds to wait
until timing out the database connection attempt.
Defaults to 5000ms.
max_tries (int, optional): how many tries before giving up,
if 0 then try forever. Defaults to 3.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
dbconf = bigchaindb.config['database']
self.host = host or dbconf['host']
self.port = port or dbconf['port']
self.dbname = dbname or dbconf['name']
self.connection_timeout = connection_timeout if connection_timeout is not None\
else dbconf['connection_timeout']
self.max_tries = max_tries if max_tries is not None else dbconf['max_tries']
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
self._conn = None
@property
def conn(self):
if self._conn is None:
self.connect()
return self._conn
def run(self, query):
"""Run a query.
@ -94,3 +123,26 @@ class Connection:
"""
raise NotImplementedError()
def connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
"""
attempt = 0
for i in self.max_tries_counter:
attempt += 1
try:
self._conn = self._connect()
except ConnectionError as exc:
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
attempt, self.max_tries if self.max_tries != 0 else '',
self.host, self.port, self.connection_timeout)
if attempt == self.max_tries:
logger.critical('Cannot connect to the Database. Giving up.')
raise ConnectionError() from exc
else:
break

View File

@ -1,6 +1,5 @@
import time
import logging
from itertools import repeat
import pymongo
@ -15,46 +14,20 @@ from bigchaindb.backend.connection import Connection
logger = logging.getLogger(__name__)
# TODO: waiting for #1082 to be merged
# to move this constants in the configuration.
CONNECTION_TIMEOUT = 4000 # in milliseconds
MAX_RETRIES = 3 # number of tries before giving up, if 0 then try forever
class MongoDBConnection(Connection):
def __init__(self, host=None, port=None, dbname=None,
connection_timeout=None, max_tries=None,
replicaset=None):
def __init__(self, replicaset=None, **kwargs):
"""Create a new Connection instance.
Args:
host (str, optional): the host to connect to.
port (int, optional): the port to connect to.
dbname (str, optional): the database to use.
connection_timeout (int, optional): the milliseconds to wait
until timing out the database connection attempt.
max_tries (int, optional): how many tries before giving up,
if 0 then try forever.
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
self.host = host or bigchaindb.config['database']['host']
self.port = port or bigchaindb.config['database']['port']
super().__init__(**kwargs)
self.replicaset = replicaset or bigchaindb.config['database']['replicaset']
self.dbname = dbname or bigchaindb.config['database']['name']
self.connection_timeout = connection_timeout if connection_timeout is not None else CONNECTION_TIMEOUT
self.max_tries = max_tries if max_tries is not None else MAX_RETRIES
self.max_tries_counter = range(self.max_tries) if self.max_tries != 0 else repeat(0)
self.connection = None
@property
def conn(self):
if self.connection is None:
self._connect()
return self.connection
@property
def db(self):
@ -94,34 +67,23 @@ class MongoDBConnection(Connection):
fails.
"""
attempt = 0
for i in self.max_tries_counter:
attempt += 1
try:
# we should only return a connection if the replica set is
# initialized. initialize_replica_set will check if the
# replica set is initialized else it will initialize it.
initialize_replica_set(self.host, self.port, self.connection_timeout)
try:
# we should only return a connection if the replica set is
# initialized. initialize_replica_set will check if the
# replica set is initialized else it will initialize it.
initialize_replica_set(self.host, self.port, self.connection_timeout)
# FYI: this might raise a `ServerSelectionTimeoutError`,
# that is a subclass of `ConnectionFailure`.
return pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout)
# FYI: this might raise a `ServerSelectionTimeoutError`,
# that is a subclass of `ConnectionFailure`.
self.connection = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout)
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.warning('Attempt %s/%s. Connection to %s:%s failed after %sms.',
attempt, self.max_tries if self.max_tries != 0 else '',
self.host, self.port, self.connection_timeout)
if attempt == self.max_tries:
logger.critical('Cannot connect to the Database. Giving up.')
raise ConnectionError() from exc
else:
break
# `initialize_replica_set` might raise `ConnectionFailure` or `OperationFailure`.
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
raise ConnectionError() from exc
def initialize_replica_set(host, port, connection_timeout):
@ -168,7 +130,7 @@ def _check_replica_set(conn):
options = conn.admin.command('getCmdLineOpts')
try:
repl_opts = options['parsed']['replication']
repl_set_name = repl_opts.get('replSetName', None) or repl_opts['replSet']
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
except KeyError:
raise ConfigurationError('mongod was not started with'
' the replSet option.')

View File

@ -212,13 +212,6 @@ def get_block(conn, block_id):
projection={'_id': False}))
@register_query(MongoDBConnection)
def has_transaction(conn, transaction_id):
return bool(conn.run(
conn.collection('bigchain')
.find_one({'block.transactions.id': transaction_id})))
@register_query(MongoDBConnection)
def count_blocks(conn):
return conn.run(

View File

@ -211,20 +211,6 @@ def get_block(connection, block_id):
raise NotImplementedError
@singledispatch
def has_transaction(connection, transaction_id):
"""Check if a transaction exists in the bigchain table.
Args:
transaction_id (str): the id of the transaction to check.
Returns:
``True`` if the transaction exists, ``False`` otherwise.
"""
raise NotImplementedError
@singledispatch
def count_blocks(connection):
"""Count the number of blocks in the bigchain table.

View File

@ -96,7 +96,7 @@ def reconfigure(connection, *, table, shards, replicas,
try:
return connection.run(r.table(table).reconfigure(**params))
except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e:
raise OperationError from e
raise OperationError('Failed to reconfigure tables.') from e
@register_admin(RethinkDBConnection)

View File

@ -3,6 +3,7 @@ import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.backend.exceptions import BackendError
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
@ -23,8 +24,8 @@ class RethinkDBChangeFeed(ChangeFeed):
try:
self.run_changefeed()
break
except (r.ReqlDriverError, r.ReqlOpFailedError) as exc:
logger.exception(exc)
except (BackendError, r.ReqlDriverError) as exc:
logger.exception('Error connecting to the database, retrying')
time.sleep(1)
def run_changefeed(self):

View File

@ -1,11 +1,7 @@
import time
import logging
import rethinkdb as r
from bigchaindb.backend.connection import Connection
logger = logging.getLogger(__name__)
from bigchaindb.backend.exceptions import ConnectionError, OperationError
class RethinkDBConnection(Connection):
@ -17,23 +13,6 @@ class RethinkDBConnection(Connection):
more times to run the query or open a connection.
"""
def __init__(self, host, port, dbname, max_tries=3, **kwargs):
"""Create a new :class:`~.RethinkDBConnection` instance.
See :meth:`.Connection.__init__` for
:attr:`host`, :attr:`port`, and :attr:`dbname`.
Args:
max_tries (int, optional): how many tries before giving up.
Defaults to 3.
"""
self.host = host
self.port = port
self.dbname = dbname
self.max_tries = max_tries
self.conn = None
def run(self, query):
"""Run a RethinkDB query.
@ -45,16 +24,10 @@ class RethinkDBConnection(Connection):
:attr:`~.RethinkDBConnection.max_tries`.
"""
if self.conn is None:
self._connect()
for i in range(self.max_tries):
try:
return query.run(self.conn)
except r.ReqlDriverError:
if i + 1 == self.max_tries:
raise
self._connect()
try:
return query.run(self.conn)
except r.ReqlDriverError as exc:
raise OperationError from exc
def _connect(self):
"""Set a connection to RethinkDB.
@ -66,16 +39,7 @@ class RethinkDBConnection(Connection):
:attr:`~.RethinkDBConnection.max_tries`.
"""
for i in range(1, self.max_tries + 1):
logging.debug('Connecting to database %s:%s/%s. (Attempt %s/%s)',
self.host, self.port, self.dbname, i, self.max_tries)
try:
self.conn = r.connect(host=self.host, port=self.port, db=self.dbname)
except r.ReqlDriverError:
if i == self.max_tries:
raise
wait_time = 2**i
logging.debug('Error connecting to database, waiting %ss', wait_time)
time.sleep(wait_time)
else:
break
try:
return r.connect(host=self.host, port=self.port, db=self.dbname)
except r.ReqlDriverError as exc:
raise ConnectionError from exc

View File

@ -158,13 +158,6 @@ def get_block(connection, block_id):
return connection.run(r.table('bigchain').get(block_id))
@register_query(RethinkDBConnection)
def has_transaction(connection, transaction_id):
return bool(connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get_all(transaction_id, index='transaction_id').count()))
@register_query(RethinkDBConnection)
def count_blocks(connection):
return connection.run(

View File

@ -3,12 +3,11 @@ the command-line interface (CLI) for BigchainDB Server.
"""
import os
import sys
import logging
import argparse
import copy
import json
import builtins
import sys
import logstats
@ -17,36 +16,37 @@ from bigchaindb.common.exceptions import (StartupError,
DatabaseAlreadyExists,
KeypairNotFoundException)
import bigchaindb
import bigchaindb.config_utils
from bigchaindb.models import Transaction
from bigchaindb.utils import ProcessGroup
from bigchaindb import backend
from bigchaindb import backend, processes
from bigchaindb.backend import schema
from bigchaindb.backend.admin import (set_replicas, set_shards, add_replicas,
remove_replicas)
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.commands import utils
from bigchaindb import processes
from bigchaindb.commands.messages import (
CANNOT_START_KEYPAIR_NOT_FOUND,
RETHINKDB_STARTUP_ERROR,
)
from bigchaindb.commands.utils import configure_bigchaindb, input_on_stderr
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# We need this because `input` always prints on stdout, while it should print
# to stderr. It's a very old bug, check it out here:
# - https://bugs.python.org/issue1927
def input_on_stderr(prompt=''):
print(prompt, end='', file=sys.stderr)
return builtins.input()
# Note about printing:
# We try to print to stdout for results of a command that may be useful to
# someone (or another program). Strictly informational text, or errors,
# should be printed to stderr.
@configure_bigchaindb
def run_show_config(args):
"""Show the current configuration"""
# TODO Proposal: remove the "hidden" configuration. Only show config. If
# the system needs to be configured, then display information on how to
# configure the system.
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
config = copy.deepcopy(bigchaindb.config)
del config['CONFIGURED']
private_key = config['keypair']['private']
@ -89,7 +89,7 @@ def run_configure(args, skip_if_exists=False):
# select the correct config defaults based on the backend
print('Generating default configuration for backend {}'
.format(args.backend))
.format(args.backend), file=sys.stderr)
conf['database'] = bigchaindb._database_map[args.backend]
if not args.yes:
@ -119,11 +119,10 @@ def run_configure(args, skip_if_exists=False):
print('Ready to go!', file=sys.stderr)
@configure_bigchaindb
def run_export_my_pubkey(args):
"""Export this node's public key to standard output
"""
logger.debug('bigchaindb args = {}'.format(args))
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
pubkey = bigchaindb.config['keypair']['public']
if pubkey is not None:
print(pubkey)
@ -141,14 +140,13 @@ def _run_init():
schema.init_database(connection=b.connection)
logger.info('Create genesis block.')
b.create_genesis_block()
logger.info('Done, have fun!')
logger.info('Genesis block created.')
@configure_bigchaindb
def run_init(args):
"""Initialize the database"""
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
# TODO Provide mechanism to:
# 1. prompt the user to inquire whether they wish to drop the db
# 2. force the init, (e.g., via -f flag)
@ -159,9 +157,9 @@ def run_init(args):
print('If you wish to re-initialize it, first drop it.', file=sys.stderr)
@configure_bigchaindb
def run_drop(args):
"""Drop the database"""
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
dbname = bigchaindb.config['database']['name']
if not args.yes:
@ -174,11 +172,10 @@ def run_drop(args):
schema.drop_database(conn, dbname)
@configure_bigchaindb
def run_start(args):
"""Start the processes to run the node"""
logger.info('BigchainDB Version {}'.format(bigchaindb.__version__))
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
logger.info('BigchainDB Version %s', bigchaindb.__version__)
if args.allow_temp_keypair:
if not (bigchaindb.config['keypair']['private'] or
@ -194,7 +191,7 @@ def run_start(args):
try:
proc = utils.start_rethinkdb()
except StartupError as e:
sys.exit('Error starting RethinkDB, reason is: {}'.format(e))
sys.exit(RETHINKDB_STARTUP_ERROR.format(e))
logger.info('RethinkDB started with PID %s' % proc.pid)
try:
@ -202,8 +199,7 @@ def run_start(args):
except DatabaseAlreadyExists:
pass
except KeypairNotFoundException:
sys.exit("Can't start BigchainDB, no keypair found. "
'Did you run `bigchaindb configure`?')
sys.exit(CANNOT_START_KEYPAIR_NOT_FOUND)
logger.info('Starting BigchainDB main process with public key %s',
bigchaindb.config['keypair']['public'])
@ -227,8 +223,8 @@ def _run_load(tx_left, stats):
break
@configure_bigchaindb
def run_load(args):
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
logger.info('Starting %s processes', args.multiprocess)
stats = logstats.Logstats()
logstats.thread.start(stats)
@ -243,46 +239,48 @@ def run_load(args):
workers.start()
@configure_bigchaindb
def run_set_shards(args):
conn = backend.connect()
try:
set_shards(conn, shards=args.num_shards)
except OperationError as e:
logger.warn(e)
sys.exit(str(e))
@configure_bigchaindb
def run_set_replicas(args):
conn = backend.connect()
try:
set_replicas(conn, replicas=args.num_replicas)
except OperationError as e:
logger.warn(e)
sys.exit(str(e))
@configure_bigchaindb
def run_add_replicas(args):
# Note: This command is specific to MongoDB
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
conn = backend.connect()
try:
add_replicas(conn, args.replicas)
except (OperationError, NotImplementedError) as e:
logger.warn(e)
sys.exit(str(e))
else:
logger.info('Added {} to the replicaset.'.format(args.replicas))
print('Added {} to the replicaset.'.format(args.replicas))
@configure_bigchaindb
def run_remove_replicas(args):
# Note: This command is specific to MongoDB
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
conn = backend.connect()
try:
remove_replicas(conn, args.replicas)
except (OperationError, NotImplementedError) as e:
logger.warn(e)
sys.exit(str(e))
else:
logger.info('Removed {} from the replicaset.'.format(args.replicas))
print('Removed {} from the replicaset.'.format(args.replicas))
def create_parser():
@ -290,16 +288,6 @@ def create_parser():
description='Control your BigchainDB node.',
parents=[utils.base_parser])
parser.add_argument('--dev-start-rethinkdb',
dest='start_rethinkdb',
action='store_true',
help='Run RethinkDB on start')
parser.add_argument('--dev-allow-temp-keypair',
dest='allow_temp_keypair',
action='store_true',
help='Generate a random keypair on start')
# all the commands are contained in the subparsers object,
# the command selected by the user will be stored in `args.command`
# that is used by the `main` function to select which other
@ -331,8 +319,18 @@ def create_parser():
help='Drop the database')
# parser for starting BigchainDB
subparsers.add_parser('start',
help='Start BigchainDB')
start_parser = subparsers.add_parser('start',
help='Start BigchainDB')
start_parser.add_argument('--dev-allow-temp-keypair',
dest='allow_temp_keypair',
action='store_true',
help='Generate a random keypair on start')
start_parser.add_argument('--dev-start-rethinkdb',
dest='start_rethinkdb',
action='store_true',
help='Run RethinkDB on start')
# parser for configuring the number of shards
sharding_parser = subparsers.add_parser('set-shards',

View File

@ -0,0 +1,10 @@
"""Module to store messages used in commands, such as error messages,
warnings, prompts, etc.
"""
CANNOT_START_KEYPAIR_NOT_FOUND = (
"Can't start BigchainDB, no keypair found. "
'Did you run `bigchaindb configure`?'
)
RETHINKDB_STARTUP_ERROR = 'Error starting RethinkDB, reason is: {}'

View File

@ -3,18 +3,39 @@ for ``argparse.ArgumentParser``.
"""
import argparse
import builtins
import functools
import multiprocessing as mp
import subprocess
import sys
import rethinkdb as r
from pymongo import uri_parser
import bigchaindb
import bigchaindb.config_utils
from bigchaindb import backend
from bigchaindb.common.exceptions import StartupError
from bigchaindb.version import __version__
def configure_bigchaindb(command):
@functools.wraps(command)
def configure(args):
bigchaindb.config_utils.autoconfigure(filename=args.config, force=True)
command(args)
return configure
# We need this because `input` always prints on stdout, while it should print
# to stderr. It's a very old bug, check it out here:
# - https://bugs.python.org/issue1927
def input_on_stderr(prompt=''):
print(prompt, end='', file=sys.stderr)
return builtins.input()
def start_rethinkdb():
"""Start RethinkDB as a child process and wait for it to be
available.

View File

@ -176,6 +176,22 @@ class Bigchain(object):
exceptions.TransactionNotInValidBlock, exceptions.AmountError):
return False
def is_new_transaction(self, txid, exclude_block_id=None):
"""
Return True if the transaction does not exist in any
VALID or UNDECIDED block. Return False otherwise.
Args:
txid (str): Transaction ID
exclude_block_id (str): Exclude block from search
"""
block_statuses = self.get_blocks_status_containing_tx(txid)
block_statuses.pop(exclude_block_id, None)
for status in block_statuses.values():
if status != self.BLOCK_INVALID:
return False
return True
def get_block(self, block_id, include_status=False):
"""Get the block with the specified `block_id` (and optionally its status)
@ -398,14 +414,13 @@ class Bigchain(object):
# check if the owner is in the condition `owners_after`
if len(output['public_keys']) == 1:
if output['condition']['details']['public_key'] == owner:
tx_link = TransactionLink(tx['id'], index)
links.append(TransactionLink(tx['id'], index))
else:
# for transactions with multiple `public_keys` there will be several subfulfillments nested
# in the condition. We need to iterate the subfulfillments to make sure there is a
# subfulfillment for `owner`
if utils.condition_details_has_owner(output['condition']['details'], owner):
tx_link = TransactionLink(tx['id'], index)
links.append(tx_link)
links.append(TransactionLink(tx['id'], index))
return links
def get_owned_ids(self, owner):
@ -502,9 +517,6 @@ class Bigchain(object):
return backend.query.write_block(self.connection, block)
def transaction_exists(self, transaction_id):
return backend.query.has_transaction(self.connection, transaction_id)
def prepare_genesis_block(self):
"""Prepare a genesis block."""

View File

@ -67,28 +67,19 @@ class BlockPipeline:
AmountError):
return None
if self.bigchain.transaction_exists(tx.id):
# if the transaction already exists, we must check whether
# it's in a valid or undecided block
tx, status = self.bigchain.get_transaction(tx.id,
include_status=True)
if status == self.bigchain.TX_VALID \
or status == self.bigchain.TX_UNDECIDED:
# if the tx is already in a valid or undecided block,
# then it no longer should be in the backlog, or added
# to a new block. We can delete and drop it.
self.bigchain.delete_transaction(tx.id)
return None
tx_validated = self.bigchain.is_valid_transaction(tx)
if tx_validated:
return tx
else:
# if the transaction is not valid, remove it from the
# backlog
# If transaction is in any VALID or UNDECIDED block we
# should not include it again
if not self.bigchain.is_new_transaction(tx.id):
self.bigchain.delete_transaction(tx.id)
return None
# If transaction is not valid it should not be included
if not self.bigchain.is_valid_transaction(tx):
self.bigchain.delete_transaction(tx.id)
return None
return tx
def create(self, tx, timeout=False):
"""Create a block.

View File

@ -13,9 +13,7 @@ from multipipes import Pipeline, Node
import bigchaindb
from bigchaindb import Bigchain
from bigchaindb import backend
from bigchaindb import config_utils
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.consensus import BaseConsensusRules
from bigchaindb.models import Transaction, Block
from bigchaindb.common import exceptions
@ -37,13 +35,6 @@ class Vote:
# we need to create a temporary instance of BigchainDB that we use
# only to query RethinkDB
consensusPlugin = bigchaindb.config.get('consensus_plugin')
if consensusPlugin:
self.consensus = config_utils.load_consensus_plugin(consensusPlugin)
else:
self.consensus = BaseConsensusRules
# This is the Bigchain instance that will be "shared" (aka: copied)
# by all the subprocesses
@ -97,7 +88,8 @@ class Vote:
yield tx, block_id, num_tx
def validate_tx(self, tx, block_id, num_tx):
"""Validate a transaction.
"""Validate a transaction. Transaction must also not be in any VALID
block.
Args:
tx (dict): the transaction to validate
@ -108,7 +100,12 @@ class Vote:
Three values are returned, the validity of the transaction,
``block_id``, ``num_tx``.
"""
return bool(self.bigchain.is_valid_transaction(tx)), block_id, num_tx
new = self.bigchain.is_new_transaction(tx.id, exclude_block_id=block_id)
if not new:
return False, block_id, num_tx
valid = bool(self.bigchain.is_valid_transaction(tx))
return valid, block_id, num_tx
def vote(self, tx_validity, block_id, num_tx):
"""Collect the validity of transactions and cast a vote when ready.

View File

@ -69,6 +69,12 @@ class Voting:
* Detect if there are multiple votes from a single node and return them
in a separate "cheat" dictionary.
* Votes must agree on previous block, otherwise they become invalid.
note:
The sum of votes returned by this function does not necessarily
equal the length of the list of votes fed in. It may differ for
example if there are found to be multiple votes submitted by a
single voter.
"""
prev_blocks = collections.Counter()
cheat = []

View File

@ -42,23 +42,10 @@ This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and p
Eventually, you'll have one or more instances (virtual machines) running on AWS and you'll want to SSH to them. To do that, you need a public/private key pair. The public key will be sent to AWS, and you can tell AWS to put it in any instances you provision there. You'll keep the private key on your local workstation.
First you need to make up a key name. Some ideas:
See the [page about how to generate a key pair for SSH](generate-key-pair-for-ssh.html).
* `bcdb-troy-1`
* `bigchaindb-7`
* `bcdb-jupiter`
If you already have key pairs on AWS (Amazon EC2), you have to pick a name that's not already being used.
Below, replace every instance of `<key-name>` with your actual key name.
To generate a public/private RSA key pair with that name:
```text
ssh-keygen -t rsa -C "<key-name>" -f ~/.ssh/<key-name>
```
It will ask you for a passphrase. You can use whatever passphrase you like, but don't lose it. Two keys (files) will be created in `~/.ssh/`:
1. `~/.ssh/<key-name>.pub` is the public key
2. `~/.ssh/<key-name>` is the private key
## Send the Public Key to AWS
To send the public key to AWS, use the AWS Command-Line Interface:
```text

View File

@ -0,0 +1,34 @@
# Generate a Key Pair for SSH
This page describes how to use `ssh-keygen`
to generate a public/private RSA key pair
that can be used with SSH.
(Note: `ssh-keygen` is found on most Linux and Unix-like
operating systems; if you're using Windows,
then you'll have to use another tool,
such as PuTTYgen.)
By convention, SSH key pairs get stored in the `~/.ssh/` directory.
Check what keys you already have there:
```text
ls -1 ~/.ssh/
```
Next, make up a new key pair name (called `<name>` below).
Here are some ideas:
* `aws-bdb-2`
* `tim-bdb-azure`
* `chris-bcdb-key`
Next, generate a public/private RSA key pair with that name:
```text
ssh-keygen -t rsa -C "<name>" -f ~/.ssh/<name>
```
It will ask you for a passphrase.
You can use whatever passphrase you like, but don't lose it.
Two keys (files) will be created in `~/.ssh/`:
1. `~/.ssh/<name>.pub` is the public key
2. `~/.ssh/<name>` is the private key

View File

@ -17,6 +17,7 @@ Appendices
pipelines
backend
aws-setup
generate-key-pair-for-ssh
firewall-notes
ntp-notes
example-rethinkdb-storage-setups

View File

@ -75,6 +75,8 @@ docker run \
--name=rethinkdb \
--publish=172.17.0.1:28015:28015 \
--publish=172.17.0.1:58080:8080 \
--restart=always \
--volume "$HOME/bigchaindb_docker:/data" \
rethinkdb:2.3
```
@ -85,11 +87,25 @@ You can also access the RethinkDB dashboard at
#### For MongoDB
Note: MongoDB runs as user `mongodb` which had the UID `999` and GID `999`
inside the container. For the volume to be mounted properly, as user `mongodb`
in your host, you should have a `mongodb` user with UID and GID `999`.
If you have another user on the host with UID `999`, the mapped files will
be owned by this user in the host.
If there is no owner with UID 999, you can create the corresponding user and
group.
`groupadd -r --gid 999 mongodb && useradd -r --uid 999 -g mongodb mongodb`
```text
docker run \
--detach \
--name=mongodb \
--publish=172.17.0.1:27017:27017 \
--restart=always \
--volume=/tmp/mongodb_docker/db:/data/db \
--volume=/tmp/mongodb_docker/configdb:/data/configdb \
mongo:3.4.1 --replSet=bigchain-rs
```
@ -100,6 +116,7 @@ docker run \
--detach \
--name=bigchaindb \
--publish=59984:9984 \
--restart=always \
--volume=$HOME/bigchaindb_docker:/data \
bigchaindb/bigchaindb \
start

View File

@ -21,7 +21,7 @@ Step 2: Configure kubectl
The default location of the kubectl configuration file is ``~/.kube/config``.
If you don't have that file, then you need to get it.
If you deployed your Kubernetes cluster on Azure
**Azure.** If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0 (as per :doc:`our template <template-kubernetes-azure>`),
then you can get the ``~/.kube/config`` file using:
@ -32,15 +32,163 @@ then you can get the ``~/.kube/config`` file using:
--name <ACS cluster name>
Step 3: Run a MongoDB Container
-------------------------------
Step 3: Create a StorageClass
-----------------------------
To start a MongoDB Docker container in a pod on one of the cluster nodes:
MongoDB needs somewhere to store its data persistently,
outside the container where MongoDB is running.
Explaining how Kubernetes handles persistent volumes,
and the associated terminology,
is beyond the scope of this documentation;
see `the Kubernetes docs about persistent volumes
<https://kubernetes.io/docs/user-guide/persistent-volumes>`_.
The first thing to do is create a Kubernetes StorageClass.
**Azure.** First, you need an Azure storage account.
If you deployed your Kubernetes cluster on Azure
using the Azure CLI 2.0
(as per :doc:`our template <template-kubernetes-azure>`),
then the `az acs create` command already created two
storage accounts in the same location and resource group
as your Kubernetes cluster.
Both should have the same "storage account SKU": ``Standard_LRS``.
Standard storage is lower-cost and lower-performance.
It uses hard disk drives (HDD).
LRS means locally-redundant storage: three replicas
in the same data center.
Premium storage is higher-cost and higher-performance.
It uses solid state drives (SSD).
At the time of writing,
when we created a storage account with SKU ``Premium_LRS``
and tried to use that,
the PersistentVolumeClaim would get stuck in a "Pending" state.
For future reference, the command to create a storage account is
`az storage account create <https://docs.microsoft.com/en-us/cli/azure/storage/account#create>`_.
Create a Kubernetes Storage Class named ``slow``
by writing a file named ``azureStorageClass.yml`` containing:
.. code:: yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Standard_LRS
location: <region where your cluster is located>
and then:
.. code:: bash
$ kubectl ?????
$ kubectl apply -f azureStorageClass.yml
You can check if it worked using ``kubectl get storageclasses``.
Note that there is no line of the form
``storageAccount: <azure storage account name>``
under ``parameters:``. When we included one
and then created a PersistentVolumeClaim based on it,
the PersistentVolumeClaim would get stuck
in a "Pending" state.
Kubernetes just looks for a storageAccount
with the specified skuName and location.
Note: The BigchainDB Dashboard can be deployed
as a Docker container, like everything else.
Step 4: Create a PersistentVolumeClaim
--------------------------------------
Next, you'll create a PersistentVolumeClaim named ``mongoclaim``.
Create a file named ``mongoclaim.yml``
with the following contents:
.. code:: yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongoclaim
annotations:
volume.beta.kubernetes.io/storage-class: slow
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
Note how there's no explicit mention of Azure, AWS or whatever.
``ReadWriteOnce`` (RWO) means the volume can be mounted as
read-write by a single Kubernetes node.
(``ReadWriteOnce`` is the *only* access mode supported
by AzureDisk.)
``storage: 20Gi`` means the volume has a size of 20
`gibibytes <https://en.wikipedia.org/wiki/Gibibyte>`_.
(You can change that if you like.)
Create ``mongoclaim`` in your Kubernetes cluster:
.. code:: bash
$ kubectl apply -f mongoclaim.yml
You can check its status using:
.. code:: bash
$ kubectl get pvc
Initially, the status of ``mongoclaim`` might be "Pending"
but it should become "Bound" fairly quickly.
.. code:: bash
$ kubectl describe pvc
Name: mongoclaim
Namespace: default
StorageClass: slow
Status: Bound
Volume: pvc-ebed81f1-fdca-11e6-abf0-000d3a27ab21
Labels: <none>
Capacity: 20Gi
Access Modes: RWO
No events.
Step 5: Deploy MongoDB & BigchainDB
-----------------------------------
Now you can deploy MongoDB and BigchainDB to your Kubernetes cluster.
Currently, the way we do that is we create a StatefulSet with two
containers: BigchainDB and MongoDB. (In the future, we'll put them
in separate pods, and we'll ensure those pods are in different nodes.)
We expose BigchainDB's port 9984 (the HTTP API port)
and MongoDB's port 27017 using a Kubernetes Service.
Get the file ``node-mdb-ss.yaml`` from GitHub using:
.. code:: bash
$ wget https://raw.githubusercontent.com/bigchaindb/bigchaindb/master/k8s/node-mdb-ss.yaml
Take a look inside that file to see how it defines the Service
and the StatefulSet.
Note how the MongoDB container uses the ``mongoclaim`` PersistentVolumeClaim
for its ``/data`` diretory (mount path).
Create the StatefulSet and Service in your cluster using:
.. code:: bash
$ kubectl apply -f node-mdb-ss.yaml
You can check that they're working using:
.. code:: bash
$ kubectl get services
$ kubectl get statefulsets

View File

@ -18,7 +18,20 @@ You may find that you have to sign up for a Free Trial subscription first.
That's okay: you can have many subscriptions.
Step 2: Deploy an Azure Container Service (ACS)
Step 2: Create an SSH Key Pair
------------------------------
You'll want an SSH key pair so you'll be able to SSH
to the virtual machines that you'll deploy in the next step.
(If you already have an SSH key pair, you *could* reuse it,
but it's probably a good idea to make a new SSH key pair
for your Kubernetes VMs and nothing else.)
See the
:ref:`page about how to generate a key pair for SSH <Generate a Key Pair for SSH>`.
Step 3: Deploy an Azure Container Service (ACS)
-----------------------------------------------
It's *possible* to deploy an Azure Container Service (ACS)
@ -26,16 +39,18 @@ from the `Azure Portal <https://portal.azure.com>`_
(i.e. online in your web browser)
but it's actually easier to do it using the Azure
Command-Line Interface (CLI).
(The Azure Portal will ask you for a public SSH key
and a "service principal," and you'll have to create those
first if they don't exist. The CLI will create them
for you if necessary.)
Microsoft has `instructions to install the Azure CLI 2.0
on most common operating systems
<https://docs.microsoft.com/en-us/cli/azure/install-az-cli2>`_.
Do that.
First, update the Azure CLI to the latest version:
.. code:: bash
$ az component update
Next, login to your account using:
.. code:: bash
@ -82,8 +97,7 @@ Finally, you can deploy an ACS using something like:
--agent-count 3 \
--agent-vm-size Standard_D2_v2 \
--dns-prefix <make up a name> \
--generate-ssh-keys \
--location <same location as the resource group> \
--ssh-key-value ~/.ssh/<name>.pub \
--orchestrator-type kubernetes
There are more options. For help understanding all the options, use the built-in help:
@ -100,4 +114,32 @@ and click on the one you created
to see all the resources in it.
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.
Kubernetes cluster <node-on-kubernetes>`.
Optional: SSH to Your New Kubernetes Cluster Nodes
--------------------------------------------------
You can SSH to one of the just-deployed Kubernetes "master" nodes
(virtual machines) using:
.. code:: bash
$ ssh -i ~/.ssh/<name>.pub azureuser@<master-ip-address-or-hostname>
where you can get the IP address or hostname
of a master node from the Azure Portal.
Note how the default username is ``azureuser``.
The "agent" nodes don't get public IP addresses or hostnames,
so you can't SSH to them *directly*,
but you can first SSH to the master
and then SSH to an agent from there
(using the *private* IP address or hostname of the agent node).
To do that, you either need to copy your SSH key pair to
the master (a bad idea),
or use something like
`SSH agent forwarding <https://yakking.branchable.com/posts/ssh-A/>`_ (better).
Next, you can :doc:`run a BigchainDB node on your new
Kubernetes cluster <node-on-kubernetes>`.

114
k8s/node-mdb-ss.yaml Normal file
View File

@ -0,0 +1,114 @@
#################################################################
# This YAML file desribes a StatefulSet with two containers: #
# bigchaindb/bigchaindb:latest and mongo:3.4.1 #
# It also describes a Service to expose BigchainDB and MongoDB. #
#################################################################
apiVersion: v1
kind: Service
metadata:
name: bdb-service
namespace: default
labels:
name: bdb-service
spec:
selector:
app: bdb
ports:
- port: 9984
targetPort: 9984
name: bdb-http-api
- port: 27017
targetPort: 27017
name: mongodb-port
type: LoadBalancer
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: bdb
namespace: default
spec:
serviceName: bdb
replicas: 1
template:
metadata:
name: bdb
labels:
app: bdb
#annotations:
#pod.beta.kubernetes.io/init-containers: '[
# TODO mongodb user and group; id = 999
spec:
terminationGracePeriodSeconds: 10
containers:
- name: bdb-server
image: bigchaindb/bigchaindb:latest
args:
- start
env:
- name: BIGCHAINDB_KEYPAIR_PRIVATE
value: 3CjmRhu718gT1Wkba3LfdqX5pfYuBdaMPLd7ENUga5dm
- name: BIGCHAINDB_KEYPAIR_PUBLIC
value: EEWUAhsk94ZUHhVw7qx9oZiXYDAWc9cRz93eMrsTG4kZ
- name: BIGCHAINDB_KEYRING
value: ""
- name: BIGCHAINDB_DATABASE_BACKEND
value: mongodb
- name: BIGCHAINDB_DATABASE_HOST
value: localhost
- name: BIGCHAINDB_DATABASE_PORT
value: "27017"
- name: BIGCHAINDB_SERVER_BIND
value: "0.0.0.0:9984"
- name: BIGCHAINDB_DATABASE_REPLICASET
value: bigchain-rs
- name: BIGCHAINDB_DATABASE_NAME
value: bigchain
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
value: "120"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9984
hostPort: 9984
name: bdb-port
protocol: TCP
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: bdb-port
initialDelaySeconds: 15
timeoutSeconds: 10
- name: mongodb
image: mongo:3.4.1
args:
- --replSet=bigchain-rs
imagePullPolicy: IfNotPresent
ports:
- containerPort: 27017
hostPort: 27017
name: mdb-port
protocol: TCP
volumeMounts:
- name: mdb-data
mountPath: /data
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
tcpSocket:
port: mdb-port
successThreshold: 1
failureThreshold: 3
periodSeconds: 15
timeoutSeconds: 1
restartPolicy: Always
volumes:
- name: mdb-data
persistentVolumeClaim:
claimName: mongoclaim

131
k8s/node-rdb-ss.yaml Normal file
View File

@ -0,0 +1,131 @@
##############################################################
# This YAML file desribes a StatefulSet with two containers: #
# bigchaindb/bigchaindb:latest and rethinkdb:2.3 #
# It also describes a Service to expose BigchainDB, #
# the RethinkDB intracluster communications port, and #
# the RethinkDB web interface port. #
##############################################################
apiVersion: v1
kind: Service
metadata:
name: bdb-service
namespace: default
labels:
name: bdb-service
spec:
selector:
app: bdb
ports:
- port: 9984
targetPort: 9984
name: bdb-http-api
- port: 29015
targetPort: 29015
name: rdb-intracluster-comm-port
- port: 8080
targetPort: 8080
name: rdb-web-interface-port
type: LoadBalancer
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: bdb
namespace: default
spec:
serviceName: bdb
replicas: 1
template:
metadata:
name: bdb
labels:
app: bdb
spec:
terminationGracePeriodSeconds: 10
containers:
- name: bdb-server
image: bigchaindb/bigchaindb:latest
args:
- start
env:
- name: BIGCHAINDB_KEYPAIR_PRIVATE
value: 56mEvwwVxcYsFQ3Y8UTFB8DVBv38yoUhxzDW3DAdLVd2
- name: BIGCHAINDB_KEYPAIR_PUBLIC
value: 9DsHwiEtvk51UHmNM2eV66czFha69j3CdtNrCj1RcZWR
- name: BIGCHAINDB_KEYRING
value: ""
- name: BIGCHAINDB_DATABASE_BACKEND
value: rethinkdb
- name: BIGCHAINDB_DATABASE_HOST
value: localhost
- name: BIGCHAINDB_DATABASE_PORT
value: "28015"
- name: BIGCHAINDB_SERVER_BIND
value: "0.0.0.0:9984"
- name: BIGCHAINDB_DATABASE_NAME
value: bigchain
- name: BIGCHAINDB_BACKLOG_REASSIGN_DELAY
value: "120"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9984
hostPort: 9984
name: bdb-port
protocol: TCP
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: 9984
initialDelaySeconds: 15
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: 9984
initialDelaySeconds: 15
timeoutSeconds: 10
- name: rethinkdb
image: rethinkdb:2.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
hostPort: 8080
name: rdb-web-interface-port
protocol: TCP
- containerPort: 29015
hostPort: 29015
name: rdb-intra-port
protocol: TCP
- containerPort: 28015
hostPort: 28015
name: rdb-client-port
protocol: TCP
volumeMounts:
- name: rdb-data
mountPath: /data
resources:
limits:
cpu: 200m
memory: 768Mi
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 10
restartPolicy: Always
volumes:
- name: rdb-data
persistentVolumeClaim:
claimName: mongoclaim

View File

@ -50,6 +50,7 @@ tests_require = [
'pytest>=3.0.0',
'pytest-catchlog>=1.2.2',
'pytest-cov>=2.2.1',
'pytest-mock',
'pytest-xdist',
'pytest-flask',
'tox',
@ -65,7 +66,7 @@ install_requires = [
'pymongo~=3.4',
'pysha3~=1.0.2',
'cryptoconditions>=0.5.0',
'python-rapidjson>=0.0.8',
'python-rapidjson==0.0.8',
'logstats>=0.2.1',
'flask>=0.10.1',
'flask-restful~=0.3.0',

View File

@ -40,7 +40,7 @@ def connection():
# executed to make sure that the replica set is correctly initialized.
# Here we force the the connection setup so that all required
# `Database.command` are executed before we mock them it in the tests.
connection._connect()
connection.connect()
return connection

View File

@ -248,19 +248,6 @@ def test_get_block(signed_create_tx):
assert block_db == block.to_dict()
def test_has_transaction(signed_create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
assert query.has_transaction(conn, signed_create_tx.id)
assert query.has_transaction(conn, 'aaa') is False
def test_count_blocks(signed_create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block

View File

@ -34,6 +34,7 @@ def test_run_a_simple_query():
def test_raise_exception_when_max_tries():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import OperationError
class MockQuery:
def run(self, conn):
@ -41,28 +42,41 @@ def test_raise_exception_when_max_tries():
conn = connect()
with pytest.raises(r.ReqlDriverError):
with pytest.raises(OperationError):
conn.run(MockQuery())
def test_reconnect_when_connection_lost():
def test_reconnect_when_connection_lost(db_host, db_port):
from bigchaindb.backend import connect
def raise_exception(*args, **kwargs):
raise r.ReqlDriverError('mock')
conn = connect()
original_connect = r.connect
r.connect = raise_exception
def delayed_start():
time.sleep(1)
r.connect = original_connect
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
original_connect(host=db_host, port=db_port)
]
thread = Thread(target=delayed_start)
query = r.expr('1')
thread.start()
assert conn.run(query) == '1'
conn = connect()
query = r.expr('1')
assert conn.run(query) == '1'
def test_reconnect_when_connection_lost_tries_n_times():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import ConnectionError
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock')
]
conn = connect(max_tries=3)
query = r.expr('1')
with pytest.raises(ConnectionError):
assert conn.run(query) == '1'
def test_changefeed_reconnects_when_connection_lost(monkeypatch):

View File

@ -29,7 +29,6 @@ def test_schema(schema_func_name, args_qty):
('get_votes_by_block_id', 1),
('write_block', 1),
('get_block', 1),
('has_transaction', 1),
('write_vote', 1),
('get_last_voted_block', 1),
('get_unvoted_blocks', 1),

View File

@ -1,3 +1,5 @@
from argparse import Namespace
import pytest
@ -38,3 +40,13 @@ def mock_bigchaindb_backup_config(monkeypatch):
'backlog_reassign_delay': 5
}
monkeypatch.setattr('bigchaindb._config', config)
@pytest.fixture
def run_start_args(request):
param = getattr(request, 'param', {})
return Namespace(
config=param.get('config'),
start_rethinkdb=param.get('start_rethinkdb', False),
allow_temp_keypair=param.get('allow_temp_keypair', False),
)

View File

@ -45,7 +45,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b):
return {'shards': [{'replicas': [1]}]}
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
args = Namespace(num_shards=3)
args = Namespace(num_shards=3, config=None)
run_set_shards(args)
mock_reconfigure.assert_called_with(replicas=1, shards=3, dry_run=False)
@ -59,8 +59,7 @@ def test_set_shards(mock_reconfigure, monkeypatch, b):
mock_reconfigure.assert_called_with(replicas=3, shards=3, dry_run=False)
@patch('logging.Logger.warn')
def test_set_shards_raises_exception(mock_log, monkeypatch, b):
def test_set_shards_raises_exception(monkeypatch, b):
from bigchaindb.commands.bigchain import run_set_shards
# test that we are correctly catching the exception
@ -73,10 +72,10 @@ def test_set_shards_raises_exception(mock_log, monkeypatch, b):
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_one_replica)
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
args = Namespace(num_shards=3)
run_set_shards(args)
assert mock_log.called
args = Namespace(num_shards=3, config=None)
with pytest.raises(SystemExit) as exc:
run_set_shards(args)
assert exc.value.args == ('Failed to reconfigure tables.',)
@patch('rethinkdb.ast.Table.reconfigure')
@ -89,7 +88,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b):
return {'shards': [1, 2]}
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
args = Namespace(num_replicas=2)
args = Namespace(num_replicas=2, config=None)
run_set_replicas(args)
mock_reconfigure.assert_called_with(replicas=2, shards=2, dry_run=False)
@ -103,8 +102,7 @@ def test_set_replicas(mock_reconfigure, monkeypatch, b):
mock_reconfigure.assert_called_with(replicas=2, shards=3, dry_run=False)
@patch('logging.Logger.warn')
def test_set_replicas_raises_exception(mock_log, monkeypatch, b):
def test_set_replicas_raises_exception(monkeypatch, b):
from bigchaindb.commands.bigchain import run_set_replicas
# test that we are correctly catching the exception
@ -117,7 +115,7 @@ def test_set_replicas_raises_exception(mock_log, monkeypatch, b):
monkeypatch.setattr(rethinkdb.RqlQuery, 'run', mockreturn_two_shards)
monkeypatch.setattr(rethinkdb.ast.Table, 'reconfigure', mock_raise)
args = Namespace(num_replicas=2)
run_set_replicas(args)
assert mock_log.called
args = Namespace(num_replicas=2, config=None)
with pytest.raises(SystemExit) as exc:
run_set_replicas(args)
assert exc.value.args == ('Failed to reconfigure tables.',)

View File

@ -1,6 +1,6 @@
import json
from unittest.mock import Mock, patch
from argparse import Namespace, ArgumentTypeError
from argparse import Namespace
import copy
import pytest
@ -26,42 +26,6 @@ def test_make_sure_we_dont_remove_any_command():
assert parser.parse_args(['remove-replicas', 'localhost:27017']).command
def test_start_raises_if_command_not_implemented():
from bigchaindb.commands.bigchain import utils
from bigchaindb.commands.bigchain import create_parser
parser = create_parser()
with pytest.raises(NotImplementedError):
# Will raise because `scope`, the third parameter,
# doesn't contain the function `run_start`
utils.start(parser, ['start'], {})
def test_start_raises_if_no_arguments_given():
from bigchaindb.commands.bigchain import utils
from bigchaindb.commands.bigchain import create_parser
parser = create_parser()
with pytest.raises(SystemExit):
utils.start(parser, [], {})
@patch('multiprocessing.cpu_count', return_value=42)
def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count):
from bigchaindb.commands.bigchain import utils
from bigchaindb.commands.bigchain import create_parser
def run_load(args):
return args
parser = create_parser()
assert utils.start(parser, ['load'], {'run_load': run_load}).multiprocess == 1
assert utils.start(parser, ['load', '--multiprocess'], {'run_load': run_load}).multiprocess == 42
@patch('bigchaindb.commands.utils.start')
def test_main_entrypoint(mock_start):
from bigchaindb.commands.bigchain import main
@ -131,9 +95,10 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch):
monkeypatch.setitem(config['keypair'], 'public', 'Charlie_Bucket')
_, _ = capsys.readouterr() # has the effect of clearing capsys
run_export_my_pubkey(args)
out, err = capsys.readouterr()
assert out == config['keypair']['public'] + '\n'
assert out == 'Charlie_Bucket\n'
out, _ = capsys.readouterr()
lines = out.splitlines()
assert config['keypair']['public'] in lines
assert 'Charlie_Bucket' in lines
def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch):
@ -302,6 +267,64 @@ def test_allow_temp_keypair_doesnt_override_if_keypair_found(mock_gen_keypair,
assert bigchaindb.config['keypair']['public'] == original_public_key
def test_run_start_when_db_already_exists(mocker, monkeypatch, run_start_args):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.common.exceptions import DatabaseAlreadyExists
mocked_start = mocker.patch('bigchaindb.processes.start')
def mock_run_init():
raise DatabaseAlreadyExists()
monkeypatch.setattr(
'bigchaindb.commands.bigchain._run_init', mock_run_init)
run_start(run_start_args)
assert mocked_start.called
def test_run_start_when_keypair_not_found(mocker, monkeypatch, run_start_args):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.commands.messages import CANNOT_START_KEYPAIR_NOT_FOUND
from bigchaindb.common.exceptions import KeypairNotFoundException
mocked_start = mocker.patch('bigchaindb.processes.start')
def mock_run_init():
raise KeypairNotFoundException()
monkeypatch.setattr(
'bigchaindb.commands.bigchain._run_init', mock_run_init)
with pytest.raises(SystemExit) as exc:
run_start(run_start_args)
assert len(exc.value.args) == 1
assert exc.value.args[0] == CANNOT_START_KEYPAIR_NOT_FOUND
assert not mocked_start.called
def test_run_start_when_start_rethinkdb_fails(mocker,
monkeypatch,
run_start_args):
from bigchaindb.commands.bigchain import run_start
from bigchaindb.commands.messages import RETHINKDB_STARTUP_ERROR
from bigchaindb.common.exceptions import StartupError
run_start_args.start_rethinkdb = True
mocked_start = mocker.patch('bigchaindb.processes.start')
err_msg = 'Error starting rethinkdb.'
def mock_start_rethinkdb():
raise StartupError(err_msg)
monkeypatch.setattr(
'bigchaindb.commands.utils.start_rethinkdb', mock_start_rethinkdb)
with pytest.raises(SystemExit) as exc:
run_start(run_start_args)
assert len(exc.value.args) == 1
assert exc.value.args[0] == RETHINKDB_STARTUP_ERROR.format(err_msg)
assert not mocked_start.called
@patch('argparse.ArgumentParser.parse_args')
@patch('bigchaindb.commands.utils.base_parser')
@patch('bigchaindb.commands.utils.start')
@ -320,11 +343,6 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
main()
assert argparser_mock.called is True
assert parser.add_argument.called is True
parser.add_argument.assert_any_call('--dev-start-rethinkdb',
dest='start_rethinkdb',
action='store_true',
help='Run RethinkDB on start')
parser.add_subparsers.assert_called_with(title='Commands',
dest='command')
subparsers.add_parser.assert_any_call('configure',
@ -338,11 +356,19 @@ def test_calling_main(start_mock, base_parser_mock, parse_args_mock,
'key')
subparsers.add_parser.assert_any_call('init', help='Init the database')
subparsers.add_parser.assert_any_call('drop', help='Drop the database')
subparsers.add_parser.assert_any_call('start', help='Start BigchainDB')
subsubparsers.add_argument.assert_any_call('--dev-start-rethinkdb',
dest='start_rethinkdb',
action='store_true',
help='Run RethinkDB on start')
subsubparsers.add_argument.assert_any_call('--dev-allow-temp-keypair',
dest='allow_temp_keypair',
action='store_true',
help='Generate a random keypair on start')
subparsers.add_parser.assert_any_call('set-shards',
help='Configure number of shards')
subsubparsers.add_argument.assert_any_call('num_shards',
metavar='num_shards',
type=int, default=1,
@ -395,14 +421,18 @@ def test_run_add_replicas(mock_add_replicas):
mock_add_replicas.reset_mock()
# test add_replicas with `OperationError`
mock_add_replicas.side_effect = OperationError()
assert run_add_replicas(args) is None
mock_add_replicas.side_effect = OperationError('err')
with pytest.raises(SystemExit) as exc:
run_add_replicas(args)
assert exc.value.args == ('err',)
assert mock_add_replicas.call_count == 1
mock_add_replicas.reset_mock()
# test add_replicas with `NotImplementedError`
mock_add_replicas.side_effect = NotImplementedError()
assert run_add_replicas(args) is None
mock_add_replicas.side_effect = NotImplementedError('err')
with pytest.raises(SystemExit) as exc:
run_add_replicas(args)
assert exc.value.args == ('err',)
assert mock_add_replicas.call_count == 1
mock_add_replicas.reset_mock()
@ -422,29 +452,17 @@ def test_run_remove_replicas(mock_remove_replicas):
mock_remove_replicas.reset_mock()
# test add_replicas with `OperationError`
mock_remove_replicas.side_effect = OperationError()
assert run_remove_replicas(args) is None
mock_remove_replicas.side_effect = OperationError('err')
with pytest.raises(SystemExit) as exc:
run_remove_replicas(args)
assert exc.value.args == ('err',)
assert mock_remove_replicas.call_count == 1
mock_remove_replicas.reset_mock()
# test add_replicas with `NotImplementedError`
mock_remove_replicas.side_effect = NotImplementedError()
assert run_remove_replicas(args) is None
mock_remove_replicas.side_effect = NotImplementedError('err')
with pytest.raises(SystemExit) as exc:
run_remove_replicas(args)
assert exc.value.args == ('err',)
assert mock_remove_replicas.call_count == 1
mock_remove_replicas.reset_mock()
def test_mongodb_host_type():
from bigchaindb.commands.utils import mongodb_host
# bad port provided
with pytest.raises(ArgumentTypeError):
mongodb_host('localhost:11111111111')
# no port information provided
with pytest.raises(ArgumentTypeError):
mongodb_host('localhost')
# bad host provided
with pytest.raises(ArgumentTypeError):
mongodb_host(':27017')

View File

@ -0,0 +1,63 @@
import argparse
import pytest
from unittest.mock import patch
def test_start_raises_if_command_not_implemented():
from bigchaindb.commands import utils
from bigchaindb.commands.bigchain import create_parser
parser = create_parser()
with pytest.raises(NotImplementedError):
# Will raise because `scope`, the third parameter,
# doesn't contain the function `run_start`
utils.start(parser, ['start'], {})
def test_start_raises_if_no_arguments_given():
from bigchaindb.commands import utils
from bigchaindb.commands.bigchain import create_parser
parser = create_parser()
with pytest.raises(SystemExit):
utils.start(parser, [], {})
@patch('multiprocessing.cpu_count', return_value=42)
def test_start_sets_multiprocess_var_based_on_cli_args(mock_cpu_count):
from bigchaindb.commands import utils
def run_mp_arg_test(args):
return args
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(title='Commands',
dest='command')
mp_arg_test_parser = subparser.add_parser('mp_arg_test')
mp_arg_test_parser.add_argument('-m', '--multiprocess',
nargs='?',
type=int,
default=False)
scope = {'run_mp_arg_test': run_mp_arg_test}
assert utils.start(parser, ['mp_arg_test'], scope).multiprocess == 1
assert utils.start(parser, ['mp_arg_test', '--multiprocess'], scope).multiprocess == 42
def test_mongodb_host_type():
from bigchaindb.commands.utils import mongodb_host
# bad port provided
with pytest.raises(argparse.ArgumentTypeError):
mongodb_host('localhost:11111111111')
# no port information provided
with pytest.raises(argparse.ArgumentTypeError):
mongodb_host('localhost')
# bad host provided
with pytest.raises(argparse.ArgumentTypeError):
mongodb_host(':27017')

View File

@ -1188,3 +1188,40 @@ def test_transaction_unicode(b):
assert b.get_block(block.id) == block.to_dict()
assert block.validate(b) == block
assert beer_json in serialize(block.to_dict())
@pytest.mark.bdb
def test_is_new_transaction(b, genesis_block):
from bigchaindb.models import Transaction
def write_tx(n):
tx = Transaction.create([b.me], [([b.me], n)])
tx = tx.sign([b.me_private])
# Tx is new because it's not in any block
assert b.is_new_transaction(tx.id)
block = b.create_block([tx])
b.write_block(block)
return tx, block
# test VALID case
tx, block = write_tx(1)
# Tx is now in undecided block
assert not b.is_new_transaction(tx.id)
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
# After voting valid, should not be new
vote = b.vote(block.id, genesis_block.id, True)
b.write_vote(vote)
assert not b.is_new_transaction(tx.id)
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
# test INVALID case
tx, block = write_tx(2)
# Tx is now in undecided block
assert not b.is_new_transaction(tx.id)
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)
vote = b.vote(block.id, genesis_block.id, False)
b.write_vote(vote)
# Tx is new because it's only found in an invalid block
assert b.is_new_transaction(tx.id)
assert b.is_new_transaction(tx.id, exclude_block_id=block.id)

View File

@ -629,3 +629,17 @@ def test_start(mock_start, b):
from bigchaindb.pipelines import vote
vote.start()
mock_start.assert_called_with()
@pytest.mark.genesis
def test_vote_no_double_inclusion(b):
from bigchaindb.pipelines import vote
tx = dummy_tx(b)
block = b.create_block([tx])
r = vote.Vote().validate_tx(tx, block.id, 1)
assert r == (True, block.id, 1)
b.write_block(block)
r = vote.Vote().validate_tx(tx, 'other_block_id', 1)
assert r == (False, 'other_block_id', 1)

View File

@ -169,12 +169,17 @@ def test_autoconfigure_read_both_from_file_and_env(monkeypatch, request):
'host': DATABASE_HOST,
'port': DATABASE_PORT,
'name': DATABASE_NAME,
'connection_timeout': 5000,
'max_tries': 3
}
database_mongodb = {
'backend': 'mongodb',
'host': DATABASE_HOST,
'port': DATABASE_PORT,
'name': DATABASE_NAME,
'connection_timeout': 5000,
'max_tries': 3,
'replicaset': 'bigchain-rs',
}

View File

@ -10,6 +10,8 @@ def config(request, monkeypatch):
'port': 28015,
'name': 'bigchain',
'replicaset': 'bigchain-rs',
'connection_timeout': 5000,
'max_tries': 3
},
'keypair': {
'public': 'pubkey',
@ -87,3 +89,13 @@ def test_transaction_exists(monkeypatch, exists):
'bigchaindb.backend.query.has_transaction', lambda x, y: exists)
bigchain = Bigchain(public_key='pubkey', private_key='privkey')
assert bigchain.transaction_exists('txid') is exists
def test_has_previous_vote(monkeypatch):
from bigchaindb.core import Bigchain
monkeypatch.setattr(
'bigchaindb.utils.verify_vote_signature', lambda voters, vote: False)
bigchain = Bigchain(public_key='pubkey', private_key='privkey')
block = {'votes': ({'node_pubkey': 'pubkey'},)}
with pytest.raises(Exception):
bigchain.has_previous_vote(block)

View File

@ -49,6 +49,9 @@ def test_count_votes():
'counts': {
'n_valid': 9, # 9 kosher votes
'n_invalid': 4, # 1 cheat, 1 invalid, 1 malformed, 1 rogue prev block
# One of the cheat votes counts towards n_invalid, the other is
# not counted here.
# len(cheat) + n_valid + n_invalid == len(votes)
},
'cheat': [votes[:2]],
'malformed': [votes[3]],
@ -83,18 +86,15 @@ def test_must_agree_prev_block():
# Tests for vote decision making
DECISION_TESTS = [dict(
zip(['n_voters', 'n_valid', 'n_invalid'], t))
for t in [
(1, 1, 1),
(2, 2, 1),
(3, 2, 2),
(4, 3, 2),
(5, 3, 3),
(6, 4, 3),
(7, 4, 4),
(8, 5, 4),
]
DECISION_TESTS = [
{'n_voters': 1, 'n_valid': 1, 'n_invalid': 1},
{'n_voters': 2, 'n_valid': 2, 'n_invalid': 1},
{'n_voters': 3, 'n_valid': 2, 'n_invalid': 2},
{'n_voters': 4, 'n_valid': 3, 'n_invalid': 2},
{'n_voters': 5, 'n_valid': 3, 'n_invalid': 3},
{'n_voters': 6, 'n_valid': 4, 'n_invalid': 3},
{'n_voters': 7, 'n_valid': 4, 'n_invalid': 4},
{'n_voters': 8, 'n_valid': 5, 'n_invalid': 4}
]

View File

@ -47,3 +47,68 @@ def test_get_outputs_endpoint_with_invalid_unspent(client, user_pk):
res = client.get(OUTPUTS_ENDPOINT + params)
assert expected == res.json
assert res.status_code == 400
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_divisble_transactions_returns_500(b, client):
from bigchaindb.models import Transaction
from bigchaindb.common import crypto
import json
TX_ENDPOINT = '/api/v1/transactions'
def mine(tx_list):
block = b.create_block(tx_list)
b.write_block(block)
# vote the block valid
vote = b.vote(block.id, b.get_last_voted_block().id, True)
b.write_vote(vote)
alice_priv, alice_pub = crypto.generate_key_pair()
bob_priv, bob_pub = crypto.generate_key_pair()
carly_priv, carly_pub = crypto.generate_key_pair()
create_tx = Transaction.create([alice_pub], [([alice_pub], 4)])
create_tx.sign([alice_priv])
res = client.post(TX_ENDPOINT, data=json.dumps(create_tx.to_dict()))
assert res.status_code == 202
mine([create_tx])
transfer_tx = Transaction.transfer(create_tx.to_inputs(),
[([alice_pub], 3), ([bob_pub], 1)],
asset_id=create_tx.id)
transfer_tx.sign([alice_priv])
res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx.to_dict()))
assert res.status_code == 202
mine([transfer_tx])
transfer_tx_carly = Transaction.transfer([transfer_tx.to_inputs()[1]],
[([carly_pub], 1)],
asset_id=create_tx.id)
transfer_tx_carly.sign([bob_priv])
res = client.post(TX_ENDPOINT, data=json.dumps(transfer_tx_carly.to_dict()))
assert res.status_code == 202
mine([transfer_tx_carly])
asset_id = create_tx.id
url = TX_ENDPOINT + "?asset_id=" + asset_id
assert client.get(url).status_code == 200
assert len(client.get(url).json) == 3
url = OUTPUTS_ENDPOINT + '?public_key=' + alice_pub
assert client.get(url).status_code == 200
url = OUTPUTS_ENDPOINT + '?public_key=' + bob_pub
assert client.get(url).status_code == 200
url = OUTPUTS_ENDPOINT + '?public_key=' + carly_pub
assert client.get(url).status_code == 200