1
0
mirror of https://github.com/bigchaindb/bigchaindb.git synced 2024-06-14 00:23:22 +02:00

Remove unsupported backends (#2289)

* Problem: RethinkDB, change feed, old mongo, admin interface are not supported any longer.

Solution: Remove unsupported functionality. Bring the MongoDB backend implementation completely to the localmongodb package. Fix the test setup.

* Problem: Nothing depends on multipipes any longer.

Solution: Remove multipipes from setup.py.

* Problem: The how-to-run-tests doc uses --database-backend.

Solution: Do not include the --database-backend option into the documented pytest usage.

* Problem: The backends docs are outdated.

Solution: Document MongoDB as the default and only backend for BigchainDB.

* Problem: The inputs fixtures uses old blocks API.

Solution: Change the inputs fixtures to use the new blocks API.

* Problem: rethinkdb package is not used anymore.

Solution: Remove the rethinkdb dependency from setup.py.

* Problem: The abci-marked tests use outdated Mongo conn.

Solution: Replace MongoDBConnection with LocalMongoDBConnection for them.
This commit is contained in:
Lev Berman 2018-05-23 11:34:00 +02:00 committed by Troy McConaghy
parent 5403f7bfcc
commit 89b28b8471
53 changed files with 487 additions and 4553 deletions

View File

@ -2,13 +2,12 @@
## Structure
- [`changefeed.py`](./changefeed.py): Changefeed-related interfaces
- [`connection.py`](./connection.py): Database connection-related interfaces
- [`query.py`](./query.py): Database query-related interfaces, dispatched through single-dispatch
- [`schema.py`](./schema.py): Database setup and schema-related interfaces, dispatched through
single-dispatch
Built-in implementations (e.g. [RethinkDB's](./rethinkdb)) are provided in sub-directories and
Built-in implementations (e.g. [MongoDB's](./localmongodb)) are provided in sub-directories and
have their connection type's location exposed as `BACKENDS` in [`connection.py`](./connection.py).
## Single-Dispatched Interfaces

View File

@ -7,7 +7,6 @@ configuration or the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable.
"""
# Include the backend interfaces
from bigchaindb.backend import admin, changefeed, schema, query # noqa
from bigchaindb.backend import schema, query # noqa
from bigchaindb.backend.connection import connect # noqa
from bigchaindb.backend.changefeed import get_changefeed # noqa

View File

@ -1,34 +0,0 @@
"""Database configuration functions."""
from functools import singledispatch
@singledispatch
def get_config(connection, *, table):
raise NotImplementedError
@singledispatch
def reconfigure(connection, *, table, shards, replicas, **kwargs):
raise NotImplementedError
@singledispatch
def set_shards(connection, *, shards):
raise NotImplementedError
@singledispatch
def set_replicas(connection, *, replicas):
raise NotImplementedError
@singledispatch
def add_replicas(connection, replicas):
raise NotImplementedError('This command is specific to the '
'MongoDB backend.')
@singledispatch
def remove_replicas(connection, replicas):
raise NotImplementedError('This command is specific to the '
'MongoDB backend.')

View File

@ -1,90 +0,0 @@
"""Changefeed interfaces for backends."""
from functools import singledispatch
from multipipes import Node
import bigchaindb
class ChangeFeed(Node):
"""Create a new changefeed.
It extends :class:`multipipes.Node` to make it pluggable in other
Pipelines instances, and makes usage of ``self.outqueue`` to output
the data.
A changefeed is a real time feed on inserts, updates, and deletes, and
is volatile. This class is a helper to create changefeeds. Moreover,
it provides a way to specify a ``prefeed`` of iterable data to output
before the actual changefeed.
"""
INSERT = 1
DELETE = 2
UPDATE = 4
def __init__(self, table, operation, *, prefeed=None, connection=None):
"""Create a new ChangeFeed.
Args:
table (str): name of the table to listen to for changes.
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE. Combining multiple operations is possible
with the bitwise ``|`` operator
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
prefeed (:class:`~collections.abc.Iterable`, optional): whatever
set of data you want to be published first.
connection (:class:`~bigchaindb.backend.connection.Connection`, optional): # noqa
A connection to the database. If no connection is provided a
default connection will be created.
"""
super().__init__(name='changefeed')
self.prefeed = prefeed if prefeed else []
self.table = table
self.operation = operation
if connection:
self.connection = connection
else:
self.connection = bigchaindb.backend.connect(
**bigchaindb.config['database'])
def run_forever(self):
"""Main loop of the ``multipipes.Node``
This method is responsible for first feeding the prefeed to the
outqueue and after that starting the changefeed and recovering from any
errors that may occur in the backend.
"""
raise NotImplementedError
def run_changefeed(self):
"""Backend specific method to run the changefeed.
The changefeed is usually a backend cursor that is not closed when all
the results are exausted. Instead it remains open waiting for new
results.
This method should also filter each result based on the ``operation``
and put all matching results on the outqueue of ``multipipes.Node``.
"""
raise NotImplementedError
@singledispatch
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a ChangeFeed.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): name of the table to listen to for changes.
operation (int): can be ChangeFeed.INSERT, ChangeFeed.DELETE, or
ChangeFeed.UPDATE. Combining multiple operation is possible
with the bitwise ``|`` operator
(e.g. ``ChangeFeed.INSERT | ChangeFeed.UPDATE``)
prefeed (iterable): whatever set of data you want to be published
first.
"""
raise NotImplementedError

View File

@ -9,8 +9,6 @@ from bigchaindb.backend.exceptions import ConnectionError
BACKENDS = {
'localmongodb': 'bigchaindb.backend.localmongodb.connection.LocalMongoDBConnection',
'mongodb': 'bigchaindb.backend.mongodb.connection.MongoDBConnection',
'rethinkdb': 'bigchaindb.backend.rethinkdb.connection.RethinkDBConnection'
}
logger = logging.getLogger(__name__)
@ -54,6 +52,10 @@ def connect(backend=None, host=None, port=None, name=None, max_tries=None,
# I added **kwargs for both RethinkDBConnection and MongoDBConnection
# to handle these these additional args. In case of RethinkDBConnection
# it just does not do anything with it.
#
# UPD: RethinkDBConnection is not here anymore cause we no longer support RethinkDB.
# The problem described above might be reconsidered next time we introduce a backend,
# if it ever happens.
replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
login = login or bigchaindb.config['database'].get('login')

View File

@ -1,13 +1,15 @@
"""MongoDB backend implementation.
Contains a MongoDB-specific implementation of the
:mod:`~bigchaindb.backend.schema` interface.
:mod:`~bigchaindb.backend.schema` and :mod:`~bigchaindb.backend.query` interfaces.
You can specify BigchainDB to use MongoDB as its database backend by either
setting ``database.backend`` to ``'localmongodb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'localmongodb'``.
MongoDB is the default database backend for BigchainDB.
If configured to use MongoDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.localmongodb.LocalMongoDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the

View File

@ -1,5 +1,268 @@
from bigchaindb.backend.mongodb.connection import MongoDBConnection
import time
import logging
from ssl import CERT_REQUIRED
import pymongo
import bigchaindb
from bigchaindb.utils import Lazy
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend.exceptions import (DuplicateKeyError,
OperationError,
ConnectionError)
from bigchaindb.backend.connection import Connection
logger = logging.getLogger(__name__)
class LocalMongoDBConnection(MongoDBConnection):
pass
class LocalMongoDBConnection(Connection):
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None,
keyfile_passphrase=None, crlfile=None, **kwargs):
"""Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
super().__init__(**kwargs)
self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
self.login = login or bigchaindb.config['database'].get('login')
self.password = password or bigchaindb.config['database'].get('password')
self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)
self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
@property
def db(self):
return self.conn[self.dbname]
def query(self):
return Lazy()
def collection(self, name):
"""Return a lazy object that can be used to compose a query.
Args:
name (str): the name of the collection to query.
"""
return self.query()[self.dbname][name]
def run(self, query):
try:
try:
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
logger.warning('Lost connection to the database, '
'retrying query.')
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
raise ConnectionError from exc
except pymongo.errors.DuplicateKeyError as exc:
raise DuplicateKeyError from exc
except pymongo.errors.OperationFailure as exc:
print(f'DETAILS: {exc.details}')
raise OperationError from exc
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
if self.replicaset:
# we should only return a connection if the replica set is
# initialized. initialize_replica_set will check if the
# replica set is initialized else it will initialize it.
initialize_replica_set(self.host,
self.port,
self.connection_timeout,
self.dbname,
self.ssl,
self.login,
self.password,
self.ca_cert,
self.certfile,
self.keyfile,
self.keyfile_passphrase,
self.crlfile)
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
# `initialize_replica_set` might raise `ConnectionFailure`,
# `OperationFailure` or `ConfigurationError`.
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
MONGO_OPTS = {
'socketTimeoutMS': 20000,
}
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
password, ca_cert, certfile, keyfile,
keyfile_passphrase, crlfile):
"""Initialize a replica set. If already initialized skip."""
# Setup a MongoDB connection
# The reason we do this instead of `backend.connect` is that
# `backend.connect` will connect you to a replica set but this fails if
# you try to connect to a replica set that is not yet initialized
try:
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if ca_cert is None or certfile is None or keyfile is None or \
crlfile is None:
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
**MONGO_OPTS)
if login is not None and password is not None:
conn[dbname].authenticate(login, password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
ssl_ca_certs=ca_cert,
ssl_certfile=certfile,
ssl_keyfile=keyfile,
ssl_pem_passphrase=keyfile_passphrase,
ssl_crlfile=crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if login is not None:
logger.info('Authenticating to the database...')
conn[dbname].authenticate(login, mechanism='MONGODB-X509')
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
_check_replica_set(conn)
host = '{}:{}'.format(bigchaindb.config['database']['host'],
bigchaindb.config['database']['port'])
config = {'_id': bigchaindb.config['database']['replicaset'],
'members': [{'_id': 0, 'host': host}]}
try:
conn.admin.command('replSetInitiate', config)
except pymongo.errors.OperationFailure as exc_info:
if exc_info.details['codeName'] == 'AlreadyInitialized':
return
raise
else:
_wait_for_replica_set_initialization(conn)
logger.info('Initialized replica set')
finally:
if conn is not None:
logger.info('Closing initial connection to MongoDB')
conn.close()
def _check_replica_set(conn):
"""Checks if the replSet option was enabled either through the command
line option or config file and if it matches the one provided by
bigchaindb configuration.
Note:
The setting we are looking for will have a different name depending
if it was set by the config file (`replSetName`) or by command
line arguments (`replSet`).
Raise:
:exc:`~ConfigurationError`: If mongod was not started with the
replSet option.
"""
options = conn.admin.command('getCmdLineOpts')
try:
repl_opts = options['parsed']['replication']
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
except KeyError:
raise ConfigurationError('mongod was not started with'
' the replSet option.')
bdb_repl_set_name = bigchaindb.config['database']['replicaset']
if repl_set_name != bdb_repl_set_name:
raise ConfigurationError('The replicaset configuration of '
'bigchaindb (`{}`) needs to match '
'the replica set name from MongoDB'
' (`{}`)'.format(bdb_repl_set_name,
repl_set_name))
def _wait_for_replica_set_initialization(conn):
"""Wait for a replica set to finish initialization.
If a replica set is being initialized for the first time it takes some
time. Nodes need to discover each other and an election needs to take
place. During this time the database is not writable so we need to wait
before continuing with the rest of the initialization
"""
# I did not find a better way to do this for now.
# To check if the database is ready we will poll the mongodb logs until
# we find the line that says the database is ready
logger.info('Waiting for mongodb replica set initialization')
while True:
logs = conn.admin.command('getLog', 'rs')['log']
if any('database writes are now permitted' in line for line in logs):
return
time.sleep(0.1)

View File

@ -8,7 +8,6 @@ from bigchaindb.common.exceptions import MultipleValidatorOperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
from bigchaindb.common.transaction import Transaction
from bigchaindb.backend import mongodb
from bigchaindb.backend.query import VALIDATOR_UPDATE_ID
register_query = module_dispatch_registrar(backend.query)
@ -90,6 +89,14 @@ def get_asset(conn, asset_id):
pass
@register_query(LocalMongoDBConnection)
def get_assets(conn, asset_ids):
return conn.run(
conn.collection('assets')
.find({'id': {'$in': asset_ids}},
projection={'_id': False}))
@register_query(LocalMongoDBConnection)
def get_spent(conn, transaction_id, output):
return conn.run(
@ -144,8 +151,28 @@ def get_txids_filtered(conn, asset_id, operation=None):
@register_query(LocalMongoDBConnection)
def text_search(*args, **kwargs):
return mongodb.query.text_search(*args, **kwargs)
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table='assets'):
cursor = conn.run(
conn.collection(table)
.find({'$text': {
'$search': search,
'$language': language,
'$caseSensitive': case_sensitive,
'$diacriticSensitive': diacritic_sensitive}},
{'score': {'$meta': 'textScore'}, '_id': False})
.sort([('score', {'$meta': 'textScore'})])
.limit(limit))
if text_score:
return cursor
return (_remove_text_score(obj) for obj in cursor)
def _remove_text_score(asset):
asset.pop('score', None)
return asset
@register_query(LocalMongoDBConnection)

View File

@ -1,22 +0,0 @@
"""MongoDB backend implementation.
Contains a MongoDB-specific implementation of the
:mod:`~bigchaindb.backend.changefeed`, :mod:`~bigchaindb.backend.query`, and
:mod:`~bigchaindb.backend.schema` interfaces.
You can specify BigchainDB to use MongoDB as its database backend by either
setting ``database.backend`` to ``'rethinkdb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'rethinkdb'``.
If configured to use MongoDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.rethinkdb.MongoDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the
generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
from bigchaindb.backend.mongodb import admin, schema, query, changefeed # noqa
# MongoDBConnection should always be accessed via
# ``bigchaindb.backend.connect()``.

View File

@ -1,86 +0,0 @@
"""Database configuration functions."""
import logging
from pymongo.errors import OperationFailure
from bigchaindb.backend import admin
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.mongodb.connection import MongoDBConnection
logger = logging.getLogger(__name__)
register_admin = module_dispatch_registrar(admin)
@register_admin(MongoDBConnection)
def add_replicas(connection, replicas):
"""Add a set of replicas to the replicaset
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (:obj:`list` of :obj:`str`): replica addresses in the
form "hostname:port".
Raises:
OperationError: If the reconfiguration fails due to a MongoDB
:exc:`OperationFailure`
"""
# get current configuration
conf = connection.conn.admin.command('replSetGetConfig')
# MongoDB does not automatically add an id for the members so we need
# to choose one that does not exist yet. The safest way is to use
# incrementing ids, so we first check what is the highest id already in
# the set and continue from there.
cur_id = max([member['_id'] for member in conf['config']['members']])
# add the nodes to the members list of the replica set
for replica in replicas:
cur_id += 1
conf['config']['members'].append({'_id': cur_id, 'host': replica})
# increase the configuration version number
# when reconfiguring, mongodb expects a version number higher than the one
# it currently has
conf['config']['version'] += 1
# apply new configuration
try:
connection.conn.admin.command('replSetReconfig', conf['config'])
except OperationFailure as exc:
raise OperationError(exc.details['errmsg'])
@register_admin(MongoDBConnection)
def remove_replicas(connection, replicas):
"""Remove a set of replicas from the replicaset
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (:obj:`list` of :obj:`str`): replica addresses in the
form "hostname:port".
Raises:
OperationError: If the reconfiguration fails due to a MongoDB
:exc:`OperationFailure`
"""
# get the current configuration
conf = connection.conn.admin.command('replSetGetConfig')
# remove the nodes from the members list in the replica set
conf['config']['members'] = list(
filter(lambda member: member['host'] not in replicas,
conf['config']['members'])
)
# increase the configuration version number
conf['config']['version'] += 1
# apply new configuration
try:
connection.conn.admin.command('replSetReconfig', conf['config'])
except OperationFailure as exc:
raise OperationError(exc.details['errmsg'])

View File

@ -1,112 +0,0 @@
import logging
import time
import pymongo
from bigchaindb import backend
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.exceptions import BackendError
logger = logging.getLogger(__name__)
register_changefeed = module_dispatch_registrar(backend.changefeed)
class MongoDBChangeFeed(ChangeFeed):
"""This class implements a MongoDB changefeed as a multipipes Node.
We emulate the behaviour of the RethinkDB changefeed by using a tailable
cursor that listens for events on the oplog.
"""
def run_forever(self):
for element in self.prefeed:
self.outqueue.put(element)
table = self.table
dbname = self.connection.dbname
# last timestamp in the oplog. We only care for operations happening
# in the future.
last_ts = self.connection.run(
self.connection.query().local.oplog.rs.find()
.sort('$natural', pymongo.DESCENDING).limit(1)
.next()['ts'])
for record in run_changefeed(self.connection, table, last_ts):
is_insert = record['op'] == 'i'
is_delete = record['op'] == 'd'
is_update = record['op'] == 'u'
# mongodb documents uses the `_id` for the primary key.
# We are not using this field at this point and we need to
# remove it to prevent problems with schema validation.
# See https://github.com/bigchaindb/bigchaindb/issues/992
if is_insert and (self.operation & ChangeFeed.INSERT):
record['o'].pop('_id', None)
self.outqueue.put(record['o'])
elif is_delete and (self.operation & ChangeFeed.DELETE):
# on delete it only returns the id of the document
self.outqueue.put(record['o'])
elif is_update and (self.operation & ChangeFeed.UPDATE):
# the oplog entry for updates only returns the update
# operations to apply to the document and not the
# document itself. So here we first read the document
# and then return it.
doc = self.connection.conn[dbname][table].find_one(
{'_id': record['o2']['_id']},
{'_id': False}
)
self.outqueue.put(doc)
logger.debug('Record in changefeed: %s:%s', table, record['op'])
@register_changefeed(MongoDBConnection)
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a MongoDB changefeed.
Returns:
An instance of
:class:`~bigchaindb.backend.mongodb.MongoDBChangeFeed`.
"""
return MongoDBChangeFeed(table, operation, prefeed=prefeed,
connection=connection)
_FEED_STOP = False
"""If it's True then the changefeed will return when there are no more items.
"""
def run_changefeed(conn, table, last_ts):
"""Encapsulate operational logic of tailing changefeed from MongoDB
"""
while True:
try:
# XXX: hack to force reconnection, in case the connection
# is lost while waiting on the cursor. See #1154.
conn._conn = None
namespace = conn.dbname + '.' + table
query = conn.query().local.oplog.rs.find(
{'ns': namespace, 'ts': {'$gt': last_ts}},
{'o._id': False},
cursor_type=pymongo.CursorType.TAILABLE_AWAIT
)
cursor = conn.run(query)
logging.debug('Tailing oplog at %s/%s', namespace, last_ts)
while cursor.alive:
try:
record = cursor.next()
yield record
last_ts = record['ts']
except StopIteration:
if _FEED_STOP:
return
except (BackendError, pymongo.errors.ConnectionFailure):
logger.exception('Lost connection while tailing oplog, retrying')
time.sleep(1)

View File

@ -1,267 +0,0 @@
import time
import logging
from ssl import CERT_REQUIRED
import pymongo
import bigchaindb
from bigchaindb.utils import Lazy
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend.exceptions import (DuplicateKeyError,
OperationError,
ConnectionError)
from bigchaindb.backend.connection import Connection
logger = logging.getLogger(__name__)
class MongoDBConnection(Connection):
def __init__(self, replicaset=None, ssl=None, login=None, password=None,
ca_cert=None, certfile=None, keyfile=None,
keyfile_passphrase=None, crlfile=None, **kwargs):
"""Create a new Connection instance.
Args:
replicaset (str, optional): the name of the replica set to
connect to.
**kwargs: arbitrary keyword arguments provided by the
configuration's ``database`` settings
"""
super().__init__(**kwargs)
self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')
self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)
self.login = login or bigchaindb.config['database'].get('login')
self.password = password or bigchaindb.config['database'].get('password')
self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)
self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)
self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)
self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)
self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)
@property
def db(self):
return self.conn[self.dbname]
def query(self):
return Lazy()
def collection(self, name):
"""Return a lazy object that can be used to compose a query.
Args:
name (str): the name of the collection to query.
"""
return self.query()[self.dbname][name]
def run(self, query):
try:
try:
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
logger.warning('Lost connection to the database, '
'retrying query.')
return query.run(self.conn)
except pymongo.errors.AutoReconnect as exc:
raise ConnectionError from exc
except pymongo.errors.DuplicateKeyError as exc:
raise DuplicateKeyError from exc
except pymongo.errors.OperationFailure as exc:
raise OperationError from exc
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
if self.replicaset:
# we should only return a connection if the replica set is
# initialized. initialize_replica_set will check if the
# replica set is initialized else it will initialize it.
initialize_replica_set(self.host,
self.port,
self.connection_timeout,
self.dbname,
self.ssl,
self.login,
self.password,
self.ca_cert,
self.certfile,
self.keyfile,
self.keyfile_passphrase,
self.crlfile)
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
# `initialize_replica_set` might raise `ConnectionFailure`,
# `OperationFailure` or `ConfigurationError`.
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
MONGO_OPTS = {
'socketTimeoutMS': 20000,
}
def initialize_replica_set(host, port, connection_timeout, dbname, ssl, login,
password, ca_cert, certfile, keyfile,
keyfile_passphrase, crlfile):
"""Initialize a replica set. If already initialized skip."""
# Setup a MongoDB connection
# The reason we do this instead of `backend.connect` is that
# `backend.connect` will connect you to a replica set but this fails if
# you try to connect to a replica set that is not yet initialized
try:
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if ca_cert is None or certfile is None or keyfile is None or \
crlfile is None:
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
**MONGO_OPTS)
if login is not None and password is not None:
conn[dbname].authenticate(login, password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
conn = pymongo.MongoClient(host,
port,
serverselectiontimeoutms=connection_timeout,
ssl=ssl,
ssl_ca_certs=ca_cert,
ssl_certfile=certfile,
ssl_keyfile=keyfile,
ssl_pem_passphrase=keyfile_passphrase,
ssl_crlfile=crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if login is not None:
logger.info('Authenticating to the database...')
conn[dbname].authenticate(login, mechanism='MONGODB-X509')
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc
_check_replica_set(conn)
host = '{}:{}'.format(bigchaindb.config['database']['host'],
bigchaindb.config['database']['port'])
config = {'_id': bigchaindb.config['database']['replicaset'],
'members': [{'_id': 0, 'host': host}]}
try:
conn.admin.command('replSetInitiate', config)
except pymongo.errors.OperationFailure as exc_info:
if exc_info.details['codeName'] == 'AlreadyInitialized':
return
raise
else:
_wait_for_replica_set_initialization(conn)
logger.info('Initialized replica set')
finally:
if conn is not None:
logger.info('Closing initial connection to MongoDB')
conn.close()
def _check_replica_set(conn):
"""Checks if the replSet option was enabled either through the command
line option or config file and if it matches the one provided by
bigchaindb configuration.
Note:
The setting we are looking for will have a different name depending
if it was set by the config file (`replSetName`) or by command
line arguments (`replSet`).
Raise:
:exc:`~ConfigurationError`: If mongod was not started with the
replSet option.
"""
options = conn.admin.command('getCmdLineOpts')
try:
repl_opts = options['parsed']['replication']
repl_set_name = repl_opts.get('replSetName', repl_opts.get('replSet'))
except KeyError:
raise ConfigurationError('mongod was not started with'
' the replSet option.')
bdb_repl_set_name = bigchaindb.config['database']['replicaset']
if repl_set_name != bdb_repl_set_name:
raise ConfigurationError('The replicaset configuration of '
'bigchaindb (`{}`) needs to match '
'the replica set name from MongoDB'
' (`{}`)'.format(bdb_repl_set_name,
repl_set_name))
def _wait_for_replica_set_initialization(conn):
"""Wait for a replica set to finish initialization.
If a replica set is being initialized for the first time it takes some
time. Nodes need to discover each other and an election needs to take
place. During this time the database is not writable so we need to wait
before continuing with the rest of the initialization
"""
# I did not find a better way to do this for now.
# To check if the database is ready we will poll the mongodb logs until
# we find the line that says the database is ready
logger.info('Waiting for mongodb replica set initialization')
while True:
logs = conn.admin.command('getLog', 'rs')['log']
if any('database writes are now permitted' in line for line in logs):
return
time.sleep(0.1)

View File

@ -1,389 +0,0 @@
"""Query implementation for MongoDB"""
from time import time
from pymongo import ReturnDocument
from bigchaindb import backend
from bigchaindb.backend.mongodb.changefeed import run_changefeed
from bigchaindb.common.exceptions import CyclicBlockchainError
from bigchaindb.common.transaction import Transaction
from bigchaindb.backend.exceptions import DuplicateKeyError, OperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
register_query = module_dispatch_registrar(backend.query)
@register_query(MongoDBConnection)
def write_transaction(conn, signed_transaction):
try:
return conn.run(
conn.collection('backlog')
.insert_one(signed_transaction))
except DuplicateKeyError:
return
@register_query(MongoDBConnection)
def update_transaction(conn, transaction_id, doc):
# with mongodb we need to add update operators to the doc
doc = {'$set': doc}
return conn.run(
conn.collection('backlog')
.find_one_and_update(
{'id': transaction_id},
doc,
return_document=ReturnDocument.AFTER))
@register_query(MongoDBConnection)
def delete_transaction(conn, *transaction_id):
return conn.run(
conn.collection('backlog')
.delete_many({'id': {'$in': transaction_id}}))
@register_query(MongoDBConnection)
def get_stale_transactions(conn, reassign_delay):
return conn.run(
conn.collection('backlog')
.find({'assignment_timestamp': {'$lt': time() - reassign_delay}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_transaction_from_block(conn, transaction_id, block_id):
try:
return conn.run(
conn.collection('bigchain')
.aggregate([
{'$match': {'id': block_id}},
{'$project': {
'block.transactions': {
'$filter': {
'input': '$block.transactions',
'as': 'transaction',
'cond': {
'$eq': ['$$transaction.id', transaction_id]
}
}
}
}}])
.next()['block']['transactions']
.pop())
except (StopIteration, IndexError):
# StopIteration is raised if the block was not found
# IndexError is returned if the block is found but no transactions
# match
return
@register_query(MongoDBConnection)
def get_transaction_from_backlog(conn, transaction_id):
return conn.run(
conn.collection('backlog')
.find_one({'id': transaction_id},
projection={'_id': False,
'assignee': False,
'assignment_timestamp': False}))
@register_query(MongoDBConnection)
def get_blocks_status_from_transaction(conn, transaction_id):
return conn.run(
conn.collection('bigchain')
.find({'block.transactions.id': transaction_id},
projection=['id', 'block.voters']))
@register_query(MongoDBConnection)
def get_txids_filtered(conn, asset_id, operation=None):
match_create = {
'block.transactions.operation': 'CREATE',
'block.transactions.id': asset_id
}
match_transfer = {
'block.transactions.operation': 'TRANSFER',
'block.transactions.asset.id': asset_id
}
if operation == Transaction.CREATE:
match = match_create
elif operation == Transaction.TRANSFER:
match = match_transfer
else:
match = {'$or': [match_create, match_transfer]}
pipeline = [
{'$match': match},
{'$unwind': '$block.transactions'},
{'$match': match},
{'$project': {'block.transactions.id': True}}
]
cursor = conn.run(
conn.collection('bigchain')
.aggregate(pipeline))
return (elem['block']['transactions']['id'] for elem in cursor)
# TODO: This doesn't seem to be used anywhere
@register_query(MongoDBConnection)
def get_asset_by_id(conn, asset_id):
cursor = conn.run(
conn.collection('bigchain')
.aggregate([
{'$match': {
'block.transactions.id': asset_id,
'block.transactions.operation': 'CREATE'
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.id': asset_id,
'block.transactions.operation': 'CREATE'
}},
{'$project': {'block.transactions.asset': True}}
]))
# we need to access some nested fields before returning so lets use a
# generator to avoid having to read all records on the cursor at this point
return (elem['block']['transactions'] for elem in cursor)
@register_query(MongoDBConnection)
def get_spent(conn, transaction_id, output):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {
'block.transactions.inputs': {
'$elemMatch': {
'fulfills.transaction_id': transaction_id,
'fulfills.output_index': output,
},
},
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.inputs': {
'$elemMatch': {
'fulfills.transaction_id': transaction_id,
'fulfills.output_index': output,
},
},
}},
]))
# we need to access some nested fields before returning so lets use a
# generator to avoid having to read all records on the cursor at this point
return (elem['block']['transactions'] for elem in cursor)
@register_query(MongoDBConnection)
def get_spending_transactions(conn, inputs):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {
'block.transactions.inputs.fulfills': {
'$in': inputs,
},
}},
{'$unwind': '$block.transactions'},
{'$match': {
'block.transactions.inputs.fulfills': {
'$in': inputs,
},
}},
]))
return ((b['id'], b['block']['transactions']) for b in cursor)
@register_query(MongoDBConnection)
def get_owned_ids(conn, owner):
cursor = conn.run(
conn.collection('bigchain').aggregate([
{'$match': {'block.transactions.outputs.public_keys': owner}},
{'$unwind': '$block.transactions'},
{'$match': {'block.transactions.outputs.public_keys': owner}}
]))
return ((b['id'], b['block']['transactions']) for b in cursor)
@register_query(MongoDBConnection)
def get_votes_by_block_id(conn, block_id):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': block_id},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_votes_for_blocks_by_voter(conn, block_ids, node_pubkey):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': {'$in': block_ids},
'node_pubkey': node_pubkey},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_votes_by_block_id_and_voter(conn, block_id, node_pubkey):
return conn.run(
conn.collection('votes')
.find({'vote.voting_for_block': block_id,
'node_pubkey': node_pubkey},
projection={'_id': False}))
@register_query(MongoDBConnection)
def write_block(conn, block_dict):
return conn.run(
conn.collection('bigchain')
.insert_one(block_dict))
@register_query(MongoDBConnection)
def get_block(conn, block_id):
return conn.run(
conn.collection('bigchain')
.find_one({'id': block_id},
projection={'_id': False}))
@register_query(MongoDBConnection)
def write_assets(conn, assets):
try:
# unordered means that all the inserts will be attempted instead of
# stopping after the first error.
return conn.run(
conn.collection('assets')
.insert_many(assets, ordered=False))
# This can happen if we try to write the same asset multiple times.
# One case is when we write the same transaction into multiple blocks due
# to invalid blocks.
# The actual mongodb exception is a BulkWriteError due to a duplicated key
# in one of the inserts.
except OperationError:
return
@register_query(MongoDBConnection)
def write_metadata(conn, metadata):
try:
return conn.run(
conn.collection('metadata')
.insert_many(metadata, ordered=False))
except OperationError:
return
@register_query(MongoDBConnection)
def get_assets(conn, asset_ids):
return conn.run(
conn.collection('assets')
.find({'id': {'$in': asset_ids}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def get_metadata(conn, txn_ids):
return conn.run(
conn.collection('metadata')
.find({'id': {'$in': txn_ids}},
projection={'_id': False}))
@register_query(MongoDBConnection)
def count_blocks(conn):
return conn.run(
conn.collection('bigchain')
.count())
@register_query(MongoDBConnection)
def count_backlog(conn):
return conn.run(
conn.collection('backlog')
.count())
@register_query(MongoDBConnection)
def write_vote(conn, vote):
conn.run(conn.collection('votes').insert_one(vote))
vote.pop('_id')
return vote
@register_query(MongoDBConnection)
def get_genesis_block(conn):
return conn.run(
conn.collection('bigchain')
.find_one(
{'block.transactions.0.operation': 'GENESIS'},
{'_id': False}
))
@register_query(MongoDBConnection)
def get_last_voted_block_id(conn, node_pubkey):
last_voted = conn.run(
conn.collection('votes')
.find({'node_pubkey': node_pubkey},
sort=[('vote.timestamp', -1)]))
# pymongo seems to return a cursor even if there are no results
# so we actually need to check the count
if last_voted.count() == 0:
return get_genesis_block(conn)['id']
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
for v in last_voted}
last_block_id = list(mapping.values())[0]
explored = set()
while True:
try:
if last_block_id in explored:
raise CyclicBlockchainError()
explored.add(last_block_id)
last_block_id = mapping[last_block_id]
except KeyError:
break
return last_block_id
@register_query(MongoDBConnection)
def get_new_blocks_feed(conn, start_block_id):
namespace = conn.dbname + '.bigchain'
match = {'o.id': start_block_id, 'op': 'i', 'ns': namespace}
# Neccesary to find in descending order since tests may write same block id several times
query = conn.query().local.oplog.rs.find(match).sort('$natural', -1).next()['ts']
last_ts = conn.run(query)
feed = run_changefeed(conn, 'bigchain', last_ts)
return (evt['o'] for evt in feed if evt['op'] == 'i')
@register_query(MongoDBConnection)
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table='assets'):
cursor = conn.run(
conn.collection(table)
.find({'$text': {
'$search': search,
'$language': language,
'$caseSensitive': case_sensitive,
'$diacriticSensitive': diacritic_sensitive}},
{'score': {'$meta': 'textScore'}, '_id': False})
.sort([('score', {'$meta': 'textScore'})])
.limit(limit))
if text_score:
return cursor
return (_remove_text_score(obj) for obj in cursor)
def _remove_text_score(asset):
asset.pop('score', None)
return asset

View File

@ -1,138 +0,0 @@
"""Utils to initialize and drop the database."""
import logging
from pymongo import ASCENDING, DESCENDING, TEXT
from bigchaindb import backend
from bigchaindb.common import exceptions
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.mongodb.connection import MongoDBConnection
logger = logging.getLogger(__name__)
register_schema = module_dispatch_registrar(backend.schema)
@register_schema(MongoDBConnection)
def create_database(conn, dbname):
if dbname in conn.conn.database_names():
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
.format(dbname))
logger.info('Create database `%s`.', dbname)
# TODO: read and write concerns can be declared here
conn.conn.get_database(dbname)
@register_schema(MongoDBConnection)
def create_tables(conn, dbname):
for table_name in ['bigchain', 'backlog', 'votes', 'assets', 'metadata']:
logger.info('Create `%s` table.', table_name)
# create the table
# TODO: read and write concerns can be declared here
conn.conn[dbname].create_collection(table_name)
@register_schema(MongoDBConnection)
def create_indexes(conn, dbname):
create_bigchain_secondary_index(conn, dbname)
create_backlog_secondary_index(conn, dbname)
create_votes_secondary_index(conn, dbname)
create_assets_secondary_index(conn, dbname)
create_metadata_secondary_index(conn, dbname)
@register_schema(MongoDBConnection)
def drop_database(conn, dbname):
conn.conn.drop_database(dbname)
def create_bigchain_secondary_index(conn, dbname):
logger.info('Create `bigchain` secondary index.')
# secondary index on block id which should be unique
conn.conn[dbname]['bigchain'].create_index('id',
name='block_id',
unique=True)
# to order blocks by timestamp
conn.conn[dbname]['bigchain'].create_index([('block.timestamp',
ASCENDING)],
name='block_timestamp')
# to query the bigchain for a transaction id, this field is unique
conn.conn[dbname]['bigchain'].create_index('block.transactions.id',
name='transaction_id')
# secondary index for asset uuid, this field is unique
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.asset.id', name='asset_id')
# secondary index on the public keys of outputs
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.outputs.public_keys',
name='outputs')
# secondary index on inputs/transaction links (transaction_id, output)
conn.conn[dbname]['bigchain']\
.create_index([
('block.transactions.inputs.fulfills.transaction_id', ASCENDING),
('block.transactions.inputs.fulfills.output_index', ASCENDING),
], name='inputs')
def create_backlog_secondary_index(conn, dbname):
logger.info('Create `backlog` secondary index.')
# secondary index on the transaction id with a uniqueness constraint
# to make sure there are no duplicated transactions in the backlog
conn.conn[dbname]['backlog'].create_index('id',
name='transaction_id',
unique=True)
# compound index to read transactions from the backlog per assignee
conn.conn[dbname]['backlog']\
.create_index([('assignee', ASCENDING),
('assignment_timestamp', DESCENDING)],
name='assignee__transaction_timestamp')
def create_votes_secondary_index(conn, dbname):
logger.info('Create `votes` secondary index.')
# is the first index redundant then?
# compound index to order votes by block id and node
conn.conn[dbname]['votes'].create_index([('vote.voting_for_block',
ASCENDING),
('node_pubkey',
ASCENDING)],
name='block_and_voter',
unique=True)
def create_assets_secondary_index(conn, dbname):
logger.info('Create `assets` secondary index.')
# unique index on the id of the asset.
# the id is the txid of the transaction that created the asset
conn.conn[dbname]['assets'].create_index('id',
name='asset_id',
unique=True)
# full text search index
conn.conn[dbname]['assets'].create_index([('$**', TEXT)], name='text')
def create_metadata_secondary_index(conn, dbname):
logger.info('Create `metadata` secondary index.')
# unique index on the id of the metadata.
# the id is the txid of the transaction for which the metadata
# was specified
conn.conn[dbname]['metadata'].create_index('id',
name='transaction_id',
unique=True)
# full text search index
conn.conn[dbname]['metadata'].create_index([('$**', TEXT)], name='text')

View File

@ -9,7 +9,7 @@ PRE_COMMIT_ID = 'a_unique_id_string'
@singledispatch
def write_transaction(connection, signed_transaction):
def store_transaction(connection, signed_transaction):
"""Write a transaction to the backlog table.
Args:
@ -64,16 +64,9 @@ def store_metadatas(connection, metadata):
raise NotImplementedError
@singledispatch
def store_transaction(connection, signed_transaction):
"""Same as write_transaction."""
raise NotImplementedError
@singledispatch
def store_transactions(connection, signed_transactions):
"""Store list of transactions."""
"""Store the list of transactions."""
raise NotImplementedError
@ -120,110 +113,6 @@ def get_asset(connection, asset_id):
raise NotImplementedError
@singledispatch
def update_transaction(connection, transaction_id, doc):
"""Update a transaction in the backlog table.
Args:
transaction_id (str): the id of the transaction.
doc (dict): the values to update.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def delete_transaction(connection, *transaction_id):
"""Delete a transaction from the backlog.
Args:
*transaction_id (str): the transaction(s) to delete.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_stale_transactions(connection, reassign_delay):
"""Get a cursor of stale transactions.
Transactions are considered stale if they have been assigned a node,
but are still in the backlog after some amount of time specified in the
configuration.
Args:
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
Returns:
A cursor of transactions.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_block(connection, transaction_id, block_id):
"""Get a transaction from a specific block.
Args:
transaction_id (str): the id of the transaction.
block_id (str): the id of the block.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_backlog(connection, transaction_id):
"""Get a transaction from backlog.
Args:
transaction_id (str): the id of the transaction.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_blocks_status_from_transaction(connection, transaction_id):
"""Retrieve block election information given a secondary index and value.
Args:
value: a value to search (e.g. transaction id string, payload hash string)
index (str): name of a secondary index, e.g. 'transaction_id'
Returns:
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
"""
raise NotImplementedError
@singledispatch
def get_asset_by_id(conneciton, asset_id):
"""Returns the asset associated with an asset_id.
Args:
asset_id (str): The asset id.
Returns:
Returns a rethinkdb cursor.
"""
raise NotImplementedError
@singledispatch
def get_spent(connection, transaction_id, condition_id):
"""Check if a `txid` was already used as an input.
@ -271,63 +160,6 @@ def get_owned_ids(connection, owner):
raise NotImplementedError
@singledispatch
def get_votes_by_block_id(connection, block_id):
"""Get all the votes casted for a specific block.
Args:
block_id (str): the block id to use.
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
"""Get all the votes casted for a specific block by a specific voter.
Args:
block_id (str): the block id to use.
node_pubkey (str): base58 encoded public key
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_for_blocks_by_voter(connection, block_ids, pubkey):
"""Return votes for many block_ids
Args:
block_ids (set): block_ids
pubkey (str): public key of voting node
Returns:
A cursor of votes matching given block_ids and public key
"""
raise NotImplementedError
@singledispatch
def write_block(connection, block):
"""Write a block to the bigchain table.
Args:
block (dict): the block to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_block(connection, block_id):
"""Get a block from the bigchain table.
@ -356,46 +188,6 @@ def get_block_with_transaction(connection, txid):
raise NotImplementedError
@singledispatch
def write_assets(connection, assets):
"""Write a list of assets to the assets table.
Args:
assets (list): a list of assets to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def write_metadata(connection, metadata):
"""Write a list of metadata to the metadata table.
Args:
metadata (list): a list of metadata to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_assets(connection, asset_ids):
"""Get a list of assets from the assets table.
Args:
asset_ids (list): a list of ids for the assets to be retrieved from
the database.
Returns:
assets (list): the list of returned assets.
"""
raise NotImplementedError
@singledispatch
def get_metadata(connection, transaction_ids):
"""Get a list of metadata from the metadata table.
@ -411,64 +203,14 @@ def get_metadata(connection, transaction_ids):
@singledispatch
def count_blocks(connection):
"""Count the number of blocks in the bigchain table.
Returns:
The number of blocks.
"""
raise NotImplementedError
@singledispatch
def count_backlog(connection):
"""Count the number of transactions in the backlog table.
Returns:
The number of transactions in the backlog.
"""
raise NotImplementedError
@singledispatch
def write_vote(connection, vote):
"""Write a vote to the votes table.
def get_assets(connection, asset_ids):
"""Get a list of assets from the assets table.
Args:
vote (dict): the vote to write.
asset_ids (list): a list of ids for the assets to be retrieved from
the database.
Returns:
The database response.
assets (list): the list of returned assets.
"""
raise NotImplementedError
@singledispatch
def get_genesis_block(connection):
"""Get the genesis block.
Returns:
The genesis block
"""
raise NotImplementedError
@singledispatch
def get_last_voted_block_id(connection, node_pubkey):
"""Get the last voted block for a specific node.
Args:
node_pubkey (str): base58 encoded public key.
Returns:
The id of the last block the node has voted on. If the node didn't cast
any vote then the genesis block id is returned.
"""
raise NotImplementedError
@ -484,20 +226,6 @@ def get_txids_filtered(connection, asset_id, operation=None):
raise NotImplementedError
@singledispatch
def get_new_blocks_feed(connection, start_block_id):
"""Return a generator that yields change events of the blocks feed
Args:
start_block_id (str): ID of block to resume from
Returns:
Generator of change events
"""
raise NotImplementedError
@singledispatch
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table=None):
@ -533,7 +261,7 @@ def text_search(conn, search, *, language='english', case_sensitive=False,
@singledispatch
def get_latest_block(conn):
"""Get the latest commited block i.e. block with largest height """
"""Get the latest commited block i.e. block with largest height"""
raise NotImplementedError
@ -614,7 +342,7 @@ def store_pre_commit_state(connection, commit_id, state):
@singledispatch
def store_validator_update(conn, validator_update):
"""Store a update for the validator set """
"""Store a update for the validator set"""
raise NotImplementedError

View File

@ -1,22 +0,0 @@
"""RethinkDB backend implementation.
Contains a RethinkDB-specific implementation of the
:mod:`~bigchaindb.backend.changefeed`, :mod:`~bigchaindb.backend.query`, and
:mod:`~bigchaindb.backend.schema` interfaces.
You can specify BigchainDB to use RethinkDB as its database backend by either
setting ``database.backend`` to ``'rethinkdb'`` in your configuration file, or
setting the ``BIGCHAINDB_DATABASE_BACKEND`` environment variable to
``'rethinkdb'``.
If configured to use RethinkDB, BigchainDB will automatically return instances
of :class:`~bigchaindb.backend.rethinkdb.RethinkDBConnection` for
:func:`~bigchaindb.backend.connection.connect` and dispatch calls of the
generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
from bigchaindb.backend.rethinkdb import admin, changefeed, schema, query # noqa
# RethinkDBConnection should always be accessed via
# ``bigchaindb.backend.connect()``.

View File

@ -1,165 +0,0 @@
"""Database configuration functions."""
import logging
import rethinkdb as r
from bigchaindb.backend import admin
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_admin = module_dispatch_registrar(admin)
@register_admin(RethinkDBConnection)
def get_config(connection, *, table):
"""Get the configuration of the given table.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): The name of the table to get the configuration for.
Returns:
dict: The configuration of the given table
"""
return connection.run(r.table(table).config())
@register_admin(RethinkDBConnection)
def reconfigure(connection, *, table, shards, replicas,
primary_replica_tag=None, dry_run=False,
nonvoting_replica_tags=None):
"""Reconfigures the given table.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
table (str): The name of the table to reconfigure.
shards (int): The number of shards, an integer from 1-64.
replicas (:obj:`int` | :obj:`dict`):
* If replicas is an integer, it specifies the number of
replicas per shard. Specifying more replicas than there
are servers will return an error.
* If replicas is a dictionary, it specifies key-value pairs
of server tags and the number of replicas to assign to
those servers::
{'africa': 2, 'asia': 4, 'europe': 2, ...}
primary_replica_tag (str): The primary server specified by its
server tag. Required if ``replicas`` is a dictionary. The
tag must be in the ``replicas`` dictionary. This must not be
specified if ``replicas`` is an integer. Defaults to
``None``.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
nonvoting_replica_tags (:obj:`list` of :obj:`str`): Replicas
with these server tags will be added to the
``nonvoting_replicas`` list of the resulting configuration.
Defaults to ``None``.
Returns:
dict: A dictionary with possibly three keys:
* ``reconfigured``: the number of tables reconfigured. This
will be ``0`` if ``dry_run`` is ``True``.
* ``config_changes``: a list of new and old table
configuration values.
* ``status_changes``: a list of new and old table status
values.
For more information please consult RethinkDB's
documentation `ReQL command: reconfigure
<https://rethinkdb.com/api/python/reconfigure/>`_.
Raises:
OperationError: If the reconfiguration fails due to a
RethinkDB :exc:`ReqlOpFailedError` or
:exc:`ReqlQueryLogicError`.
"""
params = {
'shards': shards,
'replicas': replicas,
'dry_run': dry_run,
}
if primary_replica_tag:
params.update(
primary_replica_tag=primary_replica_tag,
nonvoting_replica_tags=nonvoting_replica_tags,
)
try:
return connection.run(r.table(table).reconfigure(**params))
except (r.ReqlOpFailedError, r.ReqlQueryLogicError) as e:
raise OperationError('Failed to reconfigure tables.') from e
@register_admin(RethinkDBConnection)
def set_shards(connection, *, shards, dry_run=False):
"""Sets the shards for the tables
:const:`~bigchaindb.backend.schema.TABLES`.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
shards (int): The number of shards, an integer from 1-64.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
Returns:
dict: A dictionary with the configuration and status changes.
For more details please see :func:`.reconfigure`.
"""
changes = {}
for table in TABLES:
replicas = len(
get_config(connection, table=table)['shards'][0]['replicas'])
change = reconfigure(
connection,
table=table,
shards=shards,
replicas=replicas,
dry_run=dry_run,
)
changes[table] = change
return changes
@register_admin(RethinkDBConnection)
def set_replicas(connection, *, replicas, dry_run=False):
"""Sets the replicas for the tables
:const:`~bigchaindb.backend.schema.TABLES`.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
replicas (int): The number of replicas per shard. Specifying
more replicas than there are servers will return an error.
dry_run (bool): If ``True`` the generated configuration will not
be applied to the table, only returned. Defaults to
``False``.
Returns:
dict: A dictionary with the configuration and status changes.
For more details please see :func:`.reconfigure`.
"""
changes = {}
for table in TABLES:
shards = len(get_config(connection, table=table)['shards'])
change = reconfigure(
connection,
table=table,
shards=shards,
replicas=replicas,
dry_run=dry_run,
)
changes[table] = change
return changes

View File

@ -1,59 +0,0 @@
import time
import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.backend.exceptions import BackendError
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_changefeed = module_dispatch_registrar(backend.changefeed)
class RethinkDBChangeFeed(ChangeFeed):
"""This class wraps a RethinkDB changefeed as a multipipes Node."""
def run_forever(self):
for element in self.prefeed:
self.outqueue.put(element)
for change in run_changefeed(self.connection, self.table):
is_insert = change['old_val'] is None
is_delete = change['new_val'] is None
is_update = not is_insert and not is_delete
if is_insert and (self.operation & ChangeFeed.INSERT):
self.outqueue.put(change['new_val'])
elif is_delete and (self.operation & ChangeFeed.DELETE):
self.outqueue.put(change['old_val'])
elif is_update and (self.operation & ChangeFeed.UPDATE):
self.outqueue.put(change['new_val'])
def run_changefeed(connection, table):
"""Encapsulate operational logic of tailing changefeed from RethinkDB
"""
while True:
try:
for change in connection.run(r.table(table).changes()):
yield change
break
except (BackendError, r.ReqlDriverError) as exc:
logger.exception('Error connecting to the database, retrying')
time.sleep(1)
@register_changefeed(RethinkDBConnection)
def get_changefeed(connection, table, operation, *, prefeed=None):
"""Return a RethinkDB changefeed.
Returns:
An instance of
:class:`~bigchaindb.backend.rethinkdb.RethinkDBChangeFeed`.
"""
return RethinkDBChangeFeed(table, operation, prefeed=prefeed,
connection=connection)

View File

@ -1,46 +0,0 @@
import rethinkdb as r
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.exceptions import ConnectionError, OperationError
class RethinkDBConnection(Connection):
"""This class is a proxy to run queries against the database, it is:
- lazy, since it creates a connection only when needed
- resilient, because before raising exceptions it tries
more times to run the query or open a connection.
"""
def run(self, query):
"""Run a RethinkDB query.
Args:
query: the RethinkDB query.
Raises:
:exc:`rethinkdb.ReqlDriverError`: After
:attr:`~.RethinkDBConnection.max_tries`.
"""
try:
return query.run(self.conn)
except r.ReqlDriverError as exc:
raise OperationError from exc
def _connect(self):
"""Set a connection to RethinkDB.
The connection is available via :attr:`~.RethinkDBConnection.conn`.
Raises:
:exc:`rethinkdb.ReqlDriverError`: After
:attr:`~.RethinkDBConnection.max_tries`.
"""
try:
return r.connect(host=self.host,
port=self.port,
db=self.dbname,
timeout=self.connection_timeout)
except (r.ReqlDriverError, r.ReqlTimeoutError) as exc:
raise ConnectionError from exc

View File

@ -1,312 +0,0 @@
from itertools import chain
import logging as logger
from time import time
import rethinkdb as r
from bigchaindb import backend, utils
from bigchaindb.backend.rethinkdb import changefeed
from bigchaindb.common import exceptions
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.utils import serialize
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logger.getLogger(__name__)
READ_MODE = 'majority'
WRITE_DURABILITY = 'hard'
register_query = module_dispatch_registrar(backend.query)
@register_query(RethinkDBConnection)
def write_transaction(connection, signed_transaction):
return connection.run(
r.table('backlog')
.insert(signed_transaction, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def update_transaction(connection, transaction_id, doc):
return connection.run(
r.table('backlog')
.get(transaction_id)
.update(doc))
@register_query(RethinkDBConnection)
def delete_transaction(connection, *transaction_id):
return connection.run(
r.table('backlog')
.get_all(*transaction_id)
.delete(durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_stale_transactions(connection, reassign_delay):
return connection.run(
r.table('backlog')
.filter(lambda tx: time() - tx['assignment_timestamp'] > reassign_delay))
@register_query(RethinkDBConnection)
def get_transaction_from_block(connection, transaction_id, block_id):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get(block_id)
.get_field('block')
.get_field('transactions')
.filter(lambda tx: tx['id'] == transaction_id))[0]
@register_query(RethinkDBConnection)
def get_transaction_from_backlog(connection, transaction_id):
return connection.run(
r.table('backlog')
.get(transaction_id)
.without('assignee', 'assignment_timestamp')
.default(None))
@register_query(RethinkDBConnection)
def get_blocks_status_from_transaction(connection, transaction_id):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get_all(transaction_id, index='transaction_id')
.pluck('votes', 'id', {'block': ['voters']}))
@register_query(RethinkDBConnection)
def get_txids_filtered(connection, asset_id, operation=None):
# here we only want to return the transaction ids since later on when
# we are going to retrieve the transaction with status validation
parts = []
if operation in (Transaction.CREATE, None):
# First find the asset's CREATE transaction
parts.append(connection.run(
_get_asset_create_tx_query(asset_id).get_field('id')))
if operation in (Transaction.TRANSFER, None):
# Then find any TRANSFER transactions related to the asset
parts.append(connection.run(
r.table('bigchain')
.get_all(asset_id, index='asset_id')
.concat_map(lambda block: block['block']['transactions'])
.filter(lambda transaction: transaction['asset']['id'] == asset_id)
.get_field('id')))
return chain(*parts)
@register_query(RethinkDBConnection)
def get_asset_by_id(connection, asset_id):
return connection.run(_get_asset_create_tx_query(asset_id).pluck('asset'))
def _get_asset_create_tx_query(asset_id):
return r.table('bigchain', read_mode=READ_MODE) \
.get_all(asset_id, index='transaction_id') \
.concat_map(lambda block: block['block']['transactions']) \
.filter(lambda transaction: transaction['id'] == asset_id)
@register_query(RethinkDBConnection)
def get_spent(connection, transaction_id, output):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.get_all([transaction_id, output], index='inputs')
.concat_map(lambda doc: doc['block']['transactions'])
.filter(lambda transaction: transaction['inputs'].contains(
lambda input_: input_['fulfills'] == {
'transaction_id': transaction_id, 'output_index': output})))
@register_query(RethinkDBConnection)
def get_owned_ids(connection, owner):
query = (r.table('bigchain', read_mode=READ_MODE)
.get_all(owner, index='outputs')
.distinct()
.concat_map(unwind_block_transactions)
.filter(lambda doc: doc['tx']['outputs'].contains(
lambda c: c['public_keys'].contains(owner))))
cursor = connection.run(query)
return ((b['id'], b['tx']) for b in cursor)
@register_query(RethinkDBConnection)
def get_votes_by_block_id(connection, block_id):
return connection.run(
r.table('votes', read_mode=READ_MODE)
.between([block_id, r.minval], [block_id, r.maxval], index='block_and_voter')
.without('id'))
@register_query(RethinkDBConnection)
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
return connection.run(
r.table('votes')
.get_all([block_id, node_pubkey], index='block_and_voter')
.without('id'))
@register_query(RethinkDBConnection)
def write_block(connection, block_dict):
return connection.run(
r.table('bigchain')
.insert(r.json(serialize(block_dict)), durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_block(connection, block_id):
return connection.run(r.table('bigchain').get(block_id))
@register_query(RethinkDBConnection)
def write_assets(connection, assets):
return connection.run(
r.table('assets')
.insert(assets, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def write_metadata(connection, metadata):
return connection.run(
r.table('metadata')
.insert(metadata, durability=WRITE_DURABILITY))
@register_query(RethinkDBConnection)
def get_assets(connection, asset_ids):
return connection.run(
r.table('assets', read_mode=READ_MODE)
.get_all(*asset_ids))
@register_query(RethinkDBConnection)
def get_metadata(connection, txn_ids):
return connection.run(
r.table('metadata', read_mode=READ_MODE)
.get_all(*txn_ids))
@register_query(RethinkDBConnection)
def count_blocks(connection):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.count())
@register_query(RethinkDBConnection)
def count_backlog(connection):
return connection.run(
r.table('backlog', read_mode=READ_MODE)
.count())
@register_query(RethinkDBConnection)
def write_vote(connection, vote):
return connection.run(
r.table('votes')
.insert(vote))
@register_query(RethinkDBConnection)
def get_genesis_block(connection):
return connection.run(
r.table('bigchain', read_mode=READ_MODE)
.filter(utils.is_genesis_block)
.nth(0))
@register_query(RethinkDBConnection)
def get_last_voted_block_id(connection, node_pubkey):
try:
# get the latest value for the vote timestamp (over all votes)
max_timestamp = connection.run(
r.table('votes', read_mode=READ_MODE)
.filter(r.row['node_pubkey'] == node_pubkey)
.max(r.row['vote']['timestamp']))['vote']['timestamp']
last_voted = list(connection.run(
r.table('votes', read_mode=READ_MODE)
.filter(r.row['vote']['timestamp'] == max_timestamp)
.filter(r.row['node_pubkey'] == node_pubkey)))
except r.ReqlNonExistenceError:
# return last vote if last vote exists else return Genesis block
return get_genesis_block(connection)['id']
# Now the fun starts. Since the resolution of timestamp is a second,
# we might have more than one vote per timestamp. If this is the case
# then we need to rebuild the chain for the blocks that have been retrieved
# to get the last one.
# Given a block_id, mapping returns the id of the block pointing at it.
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
for v in last_voted}
# Since we follow the chain backwards, we can start from a random
# point of the chain and "move up" from it.
last_block_id = list(mapping.values())[0]
# We must be sure to break the infinite loop. This happens when:
# - the block we are currenty iterating is the one we are looking for.
# This will trigger a KeyError, breaking the loop
# - we are visiting again a node we already explored, hence there is
# a loop. This might happen if a vote points both `previous_block`
# and `voting_for_block` to the same `block_id`
explored = set()
while True:
try:
if last_block_id in explored:
raise exceptions.CyclicBlockchainError()
explored.add(last_block_id)
last_block_id = mapping[last_block_id]
except KeyError:
break
return last_block_id
@register_query(RethinkDBConnection)
def get_new_blocks_feed(connection, start_block_id): # pragma: no cover
logger.warning('RethinkDB changefeed unable to resume from given block: %s',
start_block_id)
# In order to get blocks in the correct order, it may be acceptable to
# look in the votes table to see what order other nodes have used.
for change in changefeed.run_changefeed(connection, 'bigchain'):
yield change['new_val']
@register_query(RethinkDBConnection)
def get_votes_for_blocks_by_voter(connection, block_ids, node_pubkey):
return connection.run(
r.table('votes')
.filter(lambda row: r.expr(block_ids).contains(row['vote']['voting_for_block']))
.filter(lambda row: row['node_pubkey'] == node_pubkey))
def unwind_block_transactions(block):
"""Yield a block for each transaction in given block"""
return block['block']['transactions'].map(lambda tx: block.merge({'tx': tx}))
@register_query(RethinkDBConnection)
def get_spending_transactions(connection, links):
query = (
r.table('bigchain')
.get_all(*[(l['transaction_id'], l['output_index']) for l in links],
index='inputs')
.concat_map(unwind_block_transactions)
# filter transactions spending output
.filter(lambda doc: r.expr(links).set_intersection(
doc['tx']['inputs'].map(lambda i: i['fulfills'])))
)
cursor = connection.run(query)
return ((b['id'], b['tx']) for b in cursor)

View File

@ -1,130 +0,0 @@
import logging
import rethinkdb as r
from bigchaindb import backend
from bigchaindb.common import exceptions
from bigchaindb.backend.utils import module_dispatch_registrar
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
logger = logging.getLogger(__name__)
register_schema = module_dispatch_registrar(backend.schema)
@register_schema(RethinkDBConnection)
def create_database(connection, dbname):
if connection.run(r.db_list().contains(dbname)):
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(dbname))
logger.info('Create database `%s`.', dbname)
connection.run(r.db_create(dbname))
@register_schema(RethinkDBConnection)
def create_tables(connection, dbname):
for table_name in ['bigchain', 'backlog', 'votes', 'assets', 'metadata']:
logger.info('Create `%s` table.', table_name)
connection.run(r.db(dbname).table_create(table_name))
@register_schema(RethinkDBConnection)
def create_indexes(connection, dbname):
create_bigchain_secondary_index(connection, dbname)
create_backlog_secondary_index(connection, dbname)
create_votes_secondary_index(connection, dbname)
@register_schema(RethinkDBConnection)
def drop_database(connection, dbname):
try:
logger.info('Drop database `%s`', dbname)
connection.run(r.db_drop(dbname))
logger.info('Done.')
except r.ReqlOpFailedError:
raise exceptions.DatabaseDoesNotExist('Database `{}` does not exist'.format(dbname))
def create_bigchain_secondary_index(connection, dbname):
logger.info('Create `bigchain` secondary index.')
# to order blocks by timestamp
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('block_timestamp', r.row['block']['timestamp']))
# to query the bigchain for a transaction id
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('transaction_id', r.row['block']['transactions']['id'], multi=True))
# secondary index for asset links (in TRANSFER transactions)
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('asset_id', r.row['block']['transactions']['asset']['id'], multi=True))
# secondary index on the public keys of outputs
# the last reduce operation is to return a flatten list of public_keys
# without it we would need to match exactly the public_keys list.
# For instance querying for `pk1` would not match documents with
# `public_keys: [pk1, pk2, pk3]`
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('outputs',
r.row['block']['transactions']
.concat_map(lambda tx: tx['outputs']['public_keys'])
.reduce(lambda l, r: l + r), multi=True))
# secondary index on inputs/transaction links (transaction_id, output)
connection.run(
r.db(dbname)
.table('bigchain')
.index_create('inputs',
r.row['block']['transactions']
.concat_map(lambda tx: tx['inputs']['fulfills'])
.with_fields('transaction_id', 'output_index')
.map(lambda fulfills: [fulfills['transaction_id'],
fulfills['output_index']]),
multi=True))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('bigchain')
.index_wait())
def create_backlog_secondary_index(connection, dbname):
logger.info('Create `backlog` secondary index.')
# compound index to read transactions from the backlog per assignee
connection.run(
r.db(dbname)
.table('backlog')
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['assignment_timestamp']]))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('backlog')
.index_wait())
def create_votes_secondary_index(connection, dbname):
logger.info('Create `votes` secondary index.')
# compound index to order votes by block id and node
connection.run(
r.db(dbname)
.table('votes')
.index_create('block_and_voter', [r.row['vote']['voting_for_block'], r.row['node_pubkey']]))
# wait for rethinkdb to finish creating secondary indexes
connection.run(
r.db(dbname)
.table('votes')
.index_wait())

View File

@ -107,7 +107,7 @@ $ pip install -e .[dev] # or pip install -e '.[dev]' # for zsh
To execute tests when developing a feature or fixing a bug one could use the following command,
```bash
$ pytest -v --database-backend=localmongodb
$ pytest -v
```
NOTE: MongoDB and Tendermint should be running as discussed above.

View File

@ -15,10 +15,6 @@ Generic Interfaces
.. automodule:: bigchaindb.backend.connection
:special-members: __init__
:mod:`bigchaindb.backend.changefeed`
------------------------------------
.. automodule:: bigchaindb.backend.changefeed
:mod:`bigchaindb.backend.query`
-------------------------------
.. automodule:: bigchaindb.backend.query
@ -27,43 +23,25 @@ Generic Interfaces
--------------------------------
.. automodule:: bigchaindb.backend.schema
:mod:`bigchaindb.backend.admin`
-------------------------------
.. automodule:: bigchaindb.backend.admin
:mod:`bigchaindb.backend.utils`
-------------------------------
.. automodule:: bigchaindb.backend.utils
RethinkDB Backend
=================
.. automodule:: bigchaindb.backend.rethinkdb
:mod:`bigchaindb.backend.rethinkdb.connection`
----------------------------------------------
.. automodule:: bigchaindb.backend.rethinkdb.connection
:special-members: __init__
:mod:`bigchaindb.backend.rethinkdb.schema`
------------------------------------------
.. automodule:: bigchaindb.backend.rethinkdb.schema
:mod:`bigchaindb.backend.rethinkdb.query`
-----------------------------------------
.. automodule:: bigchaindb.backend.rethinkdb.query
:mod:`bigchaindb.backend.rethinkdb.changefeed`
----------------------------------------------
.. automodule:: bigchaindb.backend.rethinkdb.changefeed
:mod:`bigchaindb.backend.rethinkdb.admin`
-----------------------------------------
.. automodule:: bigchaindb.backend.rethinkdb.admin
MongoDB Backend
===============
Stay tuned!
.. automodule:: bigchaindb.backend.localmongodb
:special-members: __init__
:mod:`bigchaindb.backend.localmongodb.connection`
-------------------------------------------------
.. automodule:: bigchaindb.backend.localmongodb.connection
:mod:`bigchaindb.backend.localmongodb.query`
--------------------------------------------
.. automodule:: bigchaindb.backend.localmongodb.query
:mod:`bigchaindb.backend.localmongodb.schema`
---------------------------------------------
.. automodule:: bigchaindb.backend.localmongodb.schema

View File

@ -72,7 +72,6 @@ benchmarks_require = [
install_requires = [
# TODO Consider not installing the db drivers, or putting them in extras.
'rethinkdb~=2.3', # i.e. a version between 2.3 and 3.0
'pymongo~=3.6',
'pysha3~=1.0.2',
'cryptoconditions~=0.6.0.dev',
@ -83,7 +82,6 @@ install_requires = [
'flask-restful~=0.3.0',
'requests~=2.9',
'gunicorn~=19.0',
'multipipes~=0.1.0',
'jsonschema~=2.5.1',
'pyyaml~=3.12',
'aiohttp~=2.3',

View File

@ -6,22 +6,22 @@ from pymongo import MongoClient
from pymongo.database import Database
pytestmark = pytest.mark.bdb
pytestmark = [pytest.mark.bdb, pytest.mark.tendermint]
@pytest.fixture
def mock_cmd_line_opts():
return {'argv': ['mongod', '--dbpath=/data', '--replSet=bigchain-rs'],
return {'argv': ['mongod', '--dbpath=/data'],
'ok': 1.0,
'parsed': {'replication': {'replSet': 'bigchain-rs'},
'parsed': {'replication': {'replSet': None},
'storage': {'dbPath': '/data'}}}
@pytest.fixture
def mock_config_opts():
return {'argv': ['mongod', '--dbpath=/data', '--replSet=bigchain-rs'],
return {'argv': ['mongod', '--dbpath=/data'],
'ok': 1.0,
'parsed': {'replication': {'replSetName': 'bigchain-rs'},
'parsed': {'replication': {'replSetName': None},
'storage': {'dbPath': '/data'}}}
@ -35,23 +35,23 @@ def mongodb_connection():
def test_get_connection_returns_the_correct_instance(db_host, db_port):
from bigchaindb.backend import connect
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
config = {
'backend': 'mongodb',
'backend': 'localmongodb',
'host': db_host,
'port': db_port,
'name': 'test',
'replicaset': 'bigchain-rs'
'replicaset': None,
}
conn = connect(**config)
assert isinstance(conn, Connection)
assert isinstance(conn, MongoDBConnection)
assert isinstance(conn, LocalMongoDBConnection)
assert conn.conn._topology_settings.replica_set_name == config['replicaset']
@mock.patch('bigchaindb.backend.mongodb.connection.initialize_replica_set')
@mock.patch('bigchaindb.backend.localmongodb.connection.initialize_replica_set')
@mock.patch('pymongo.MongoClient.__init__')
@mock.patch('time.sleep')
def test_connection_error(mock_sleep, mock_client, mock_init_repl_set):
@ -70,7 +70,7 @@ def test_connection_error(mock_sleep, mock_client, mock_init_repl_set):
assert mock_client.call_count == 3
@mock.patch('bigchaindb.backend.mongodb.connection.initialize_replica_set')
@mock.patch('bigchaindb.backend.localmongodb.connection.initialize_replica_set')
@mock.patch('pymongo.MongoClient')
def test_connection_run_errors(mock_client, mock_init_repl_set):
from bigchaindb.backend import connect
@ -102,17 +102,17 @@ def test_connection_run_errors(mock_client, mock_init_repl_set):
@mock.patch('pymongo.database.Database.authenticate')
def test_connection_with_credentials(mock_authenticate):
import bigchaindb
from bigchaindb.backend.mongodb.connection import MongoDBConnection
conn = MongoDBConnection(host=bigchaindb.config['database']['host'],
port=bigchaindb.config['database']['port'],
login='theplague',
password='secret')
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
conn = LocalMongoDBConnection(host=bigchaindb.config['database']['host'],
port=bigchaindb.config['database']['port'],
login='theplague',
password='secret')
conn.connect()
assert mock_authenticate.call_count == 2
assert mock_authenticate.call_count == 1
def test_check_replica_set_not_enabled(mongodb_connection):
from bigchaindb.backend.mongodb.connection import _check_replica_set
from bigchaindb.backend.localmongodb.connection import _check_replica_set
from bigchaindb.common.exceptions import ConfigurationError
# no replSet option set
@ -126,7 +126,7 @@ def test_check_replica_set_not_enabled(mongodb_connection):
def test_check_replica_set_command_line(mongodb_connection,
mock_cmd_line_opts):
from bigchaindb.backend.mongodb.connection import _check_replica_set
from bigchaindb.backend.localmongodb.connection import _check_replica_set
# replSet option set through the command line
with mock.patch.object(Database, 'command',
@ -135,7 +135,7 @@ def test_check_replica_set_command_line(mongodb_connection,
def test_check_replica_set_config_file(mongodb_connection, mock_config_opts):
from bigchaindb.backend.mongodb.connection import _check_replica_set
from bigchaindb.backend.localmongodb.connection import _check_replica_set
# replSet option set through the config file
with mock.patch.object(Database, 'command', return_value=mock_config_opts):
@ -144,7 +144,7 @@ def test_check_replica_set_config_file(mongodb_connection, mock_config_opts):
def test_check_replica_set_name_mismatch(mongodb_connection,
mock_cmd_line_opts):
from bigchaindb.backend.mongodb.connection import _check_replica_set
from bigchaindb.backend.localmongodb.connection import _check_replica_set
from bigchaindb.common.exceptions import ConfigurationError
# change the replica set name so it does not match the bigchaindb config
@ -157,7 +157,7 @@ def test_check_replica_set_name_mismatch(mongodb_connection,
def test_wait_for_replica_set_initialization(mongodb_connection):
from bigchaindb.backend.mongodb.connection import _wait_for_replica_set_initialization # noqa
from bigchaindb.backend.localmongodb.connection import _wait_for_replica_set_initialization # noqa
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
@ -170,7 +170,7 @@ def test_wait_for_replica_set_initialization(mongodb_connection):
def test_initialize_replica_set(mock_cmd_line_opts):
from bigchaindb.backend.mongodb.connection import initialize_replica_set
from bigchaindb.backend.localmongodb.connection import initialize_replica_set
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [

View File

@ -71,10 +71,89 @@ def test_get_assets():
assert query.get_asset(conn, asset['id'])
def test_text_search():
from ..mongodb.test_queries import test_text_search
@pytest.mark.parametrize('table', ['assets', 'metadata'])
def test_text_search(table):
from bigchaindb.backend import connect, query
conn = connect()
test_text_search('assets')
# Example data and tests cases taken from the mongodb documentation
# https://docs.mongodb.com/manual/reference/operator/query/text/
objects = [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
{'id': 3, 'subject': 'Baking a cake', 'author': 'abc', 'views': 90},
{'id': 4, 'subject': 'baking', 'author': 'xyz', 'views': 100},
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# insert the assets
conn.db[table].insert_many(deepcopy(objects), ordered=False)
# test search single word
assert list(query.text_search(conn, 'coffee', table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
]
# match any of the search terms
assert list(query.text_search(conn, 'bake coffee cake', table=table)) == [
{'author': 'abc', 'id': 3, 'subject': 'Baking a cake', 'views': 90},
{'author': 'xyz', 'id': 1, 'subject': 'coffee', 'views': 50},
{'author': 'xyz', 'id': 4, 'subject': 'baking', 'views': 100},
{'author': 'efg', 'id': 2, 'subject': 'Coffee Shopping', 'views': 5},
{'author': 'efg', 'id': 7, 'subject': 'coffee and cream', 'views': 10}
]
# search for a phrase
assert list(query.text_search(conn, '\"coffee shop\"', table=table)) == [
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]
# exclude documents that contain a term
assert list(query.text_search(conn, 'coffee -shop', table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
]
# search different language
assert list(query.text_search(conn, 'leche', language='es', table=table)) == [
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# case and diacritic insensitive search
assert list(query.text_search(conn, 'сы́рники CAFÉS', table=table)) == [
{'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80},
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# case sensitive search
assert list(query.text_search(conn, 'Coffee', case_sensitive=True, table=table)) == [
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]
# diacritic sensitive search
assert list(query.text_search(conn, 'CAFÉ', diacritic_sensitive=True, table=table)) == [
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
]
# return text score
assert list(query.text_search(conn, 'coffee', text_score=True, table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50, 'score': 1.0},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5, 'score': 0.75},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10, 'score': 0.75},
]
# limit search result
assert list(query.text_search(conn, 'coffee', limit=2, table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]
def test_write_metadata():
@ -114,12 +193,6 @@ def test_get_metadata():
assert query.get_metadata(conn, [meta['id']])
def test_text_metadata():
from ..mongodb.test_queries import test_text_search
test_text_search('metadata')
def test_get_owned_ids(signed_create_tx, user_pk):
from bigchaindb.backend import connect, query
conn = connect()

View File

@ -1,33 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFnTCCA4WgAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwgasxCzAJBgNVBAYTAlVW
MRQwEgYDVQQIDAtDb3NtaWNzdGF0ZTEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBO
b21hZGljIEFudHMgQ29sbGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEc
MBoGA1UEAwwTSW50ZXJtZWRpYXRlIEROQUNMMzEgMB4GCSqGSIb3DQEJARYRZW1w
dHlAZG5hYy1sMy5pb24wHhcNMTcxMjIxMDA1NDA4WhcNMTgxMjMxMDA1NDA4WjCB
rjELMAkGA1UEBhMCVVYxFDASBgNVBAgMC0Nvc21pY3N0YXRlMREwDwYDVQQHDAh6
ZXJvbmVzdDEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBOb21hZGljIEFudHMgQ29s
bGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEOMAwGA1UEAwwFYWxpY2Ux
HjAcBgkqhkiG9w0BCQEWD2FsaWNlQHRoZXJlLnh5ejCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBANflMNZfdb5yySneZxYolgTEV+6kTihIksRiNePJRjfY
r5eT99S03ds1h/B/Y61r0JkxyadquDj3RhReDY25xMGyZS4M2MyLN2phvR1We7RB
cxsjFV/qWpwouPwVx0yeeKRxe0z3hCVKD+B51Au1+b2dbS4pPOxrF9tm9VEhtqcq
cuOpMi8sdAraqGNr3XfzsoHMJ9sqj4bUD8s3SZVfmvnStyIt3liX+fwz5fx94Dc1
KsySHtcqA5FyEHazXVMNWmDiS6ZINaoIOU44FTAN3pWHg4EbzGUmQN5x2Pqb5WBA
tBRqUHwhbg2ORcIhT33UODD0azIdf05WuGotMzvuV9MCAwEAAaOBxTCBwjAJBgNV
HRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIFoDAzBglghkgBhvhCAQ0EJhYkT3BlblNT
TCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmljYXRlMB0GA1UdDgQWBBQQ8b79Kkf1
4S6MhAe3oToDV7BHdzAfBgNVHSMEGDAWgBQCF8NWbOOWvQ4kSeE0KVw/SNrTBjAO
BgNVHQ8BAf8EBAMCBeAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMEMA0G
CSqGSIb3DQEBCwUAA4ICAQCRJoDdgJzsOE8OBWKDaE9KY6Zk+TdnkQKgWQF8VFrh
lEkprl7RuE3HQKno/u2sk7apz6Hs8fZAH87pQKUl8o/30k3ev+s/oXRKPD3i7lsf
QUexGEAETTfCdkuXkIVyvq/o5pEXOuEud0i69svP6v7yJ1otb5aUu0e8wTLNpukl
zICaKzaGuUuoS3vCwMHzBaDpz7ed3kpRS7rUwyMUYQtJ7jpgmGtaw9g4kSXmNyvb
ABpBJCMtMBkL3gZOfTAELmRSZoZczW1aINtqmLB2ejab1AtyjkuO7iUzJjIG/KQi
/x2GCSCL4hBe0YSBObZY0uHBq40qQDxVRhcMe36Ch2vc/TPArl/w9tlZR6FhOm3o
tLsoM1iGj0+oOe9FJA0IAIndP8Hsr1eDJ0tY5IFMY8WsWAh8yLsSYNcdrzv+AYKd
P+QnSgaIFxvvZ/589WThsGgI2VROZHL+JwvoKyvxdEOMcYFNtzS7sdIcWp2KoIwk
xi45upt/KuNEbD9+6oTe8adEoa0g93AZn9jIU3MavpgekLwZsiHJyBWz3HkGMo9l
AZLllLi32dr3i74YCCizRhKfYTC0HAb1Dw0W0s9yl+drtSTf+lzEKegMp4ovwYd8
29y+y4TNjpq5wlJtUZvFUQXxJZk4FGK8jLIG+HsYKefKB9DXCanSrgW2jdnNTKfS
Ig==
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA1+Uw1l91vnLJKd5nFiiWBMRX7qROKEiSxGI148lGN9ivl5P3
1LTd2zWH8H9jrWvQmTHJp2q4OPdGFF4NjbnEwbJlLgzYzIs3amG9HVZ7tEFzGyMV
X+panCi4/BXHTJ54pHF7TPeEJUoP4HnUC7X5vZ1tLik87GsX22b1USG2pypy46ky
Lyx0CtqoY2vdd/Oygcwn2yqPhtQPyzdJlV+a+dK3Ii3eWJf5/DPl/H3gNzUqzJIe
1yoDkXIQdrNdUw1aYOJLpkg1qgg5TjgVMA3elYeDgRvMZSZA3nHY+pvlYEC0FGpQ
fCFuDY5FwiFPfdQ4MPRrMh1/Tla4ai0zO+5X0wIDAQABAoIBAGPeVVUVG3ZZysxf
YPhTFfJmkPaHctAXoizN8XfJZ3318mP/wqjWcboxbQzHIOjb8saqzUlwiH9xgy1w
xHxG7slbKIrAzpjv3VaMl9q2Ysrja8el1wFjez65Z7jUqji7JbuL8ymLZcmKFQYV
1Q0FxNz9sb+ku9XxRDvtyJUbL07r7nB9ofHhRwUdkzjvFqlP7z9a39r/wP8n0AiO
IMzjwTla+72ad9GGSut4BM3whRnt5/SlaHrgxFUiUZe7V2ONqiHkVv8OPcdJXIhr
qA98ZOS4g+NjOWsJGd6nQqaQUbdoEPsSNeBvRSAyKPkjZRXxdH4M3zRE5xXIO3Rv
2Vw1UiECgYEA7AdxXFOUwpntSKIvkf6HfHNFxqQ7Kt2igLD/oqWbhH9imHFQzIL2
T2Bl3nfz8h/8SLNHnow/CHxY+AtDitqIM0eT5WayKvvj99n/kZ3ktR+W/ogqtmJ1
qPVXoYe/MtnnhUTwN27yh5Qtwkag/f4HUCN+ciRGjLnvqbqeR8BVlKUCgYEA6imi
uIJb3ZFzMqP/mZkveXLtebik6ewq6WL9X9fYxo0dfaJ0prnZOgwF09r23udmzgNs
6TQKO0+t/1+KCnu4WGJP+r1UVlzhT2OZwWQeP6SkWI5HngN6rUKRz9ZpyKbCf3p/
3aXzCIbCSir48/U5NeFV+ZW5zZIT4vjT4OBMeRcCgYEAjYitBoMsKkFpU2vGANLT
ZeItzj7/J+Y9otQAhwStrBdDBUy1+dzyXIjerTfa9a8Odn3jDYTl6229YAhJGUOP
9sgOPLqHTlU7z41OvoAi/CWUQs9eZX3HNmjggoFHOvQF7Bl5vpPuJTTK97uooIYq
1nhiQ4nTMGHzACFi9n20E+0CgYA9MOAq4zXqcqXt59lVZCh5zkSseEnwLx0PnCSm
NRua1ymLuTZOk2ZmutmTATdeDI5a6548WrMJyMqpX2gNUUNdJWPNHEi7wM3IFzkI
xob5Dqc71QJ2EmaufCTqcRd3rcxJz9M2MrWN1tlS1GP2LHDk2ZvD+xJw2+mCeI+1
xid1xQKBgQCfsF4s9hohZmucz6A2DK8n3cPOpfykc+rnV4IMcRXj0S9yJbina5JE
c/yLNHWSf7I4KSvmecfen7yLL7q0mNNzWQLwjZcPHLyJvRsLEo6prYP8f/AVGHYT
jSaFhWM3D2/fh+Bt+eCLE2C/NOF5q8AL792Fi/d/wTdC3ZQLajGMAg==
-----END RSA PRIVATE KEY-----

View File

@ -1,72 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIGQDCCBCigAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgbExCzAJBgNVBAYTAlVW
MRQwEgYDVQQIDAtDb3NtaWNzdGF0ZTERMA8GA1UEBwwIemVyb25lc3QxLjAsBgNV
BAoMJURlY2VudHJhbGl6ZWQgTm9tYWRpYyBBbnRzIENvbGxlY3RpdmUxFjAUBgNV
BAsMDUxlYWRlckxlc3NMYWIxDzANBgNVBAMMBkROQUNMMzEgMB4GCSqGSIb3DQEJ
ARYRZW1wdHlAZG5hYy1sMy5pb24wHhcNMTcxMjIwMjI1NjI2WhcNMjcxMjE4MjI1
NjI2WjCBqzELMAkGA1UEBhMCVVYxFDASBgNVBAgMC0Nvc21pY3N0YXRlMS4wLAYD
VQQKDCVEZWNlbnRyYWxpemVkIE5vbWFkaWMgQW50cyBDb2xsZWN0aXZlMRYwFAYD
VQQLDA1MZWFkZXJMZXNzTGFiMRwwGgYDVQQDDBNJbnRlcm1lZGlhdGUgRE5BQ0wz
MSAwHgYJKoZIhvcNAQkBFhFlbXB0eUBkbmFjLWwzLmlvbjCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBALXhmoEkuqmmmCu87u7lrPMGz5fzs8FWDjzLEwOE
uZeFCB8qvdasDxcTZzOSYXOGv4dI4w7QjM+5RRHVcZimGzchFxEyfxmYxLSIYWbG
nP4NxI0ZeN3GNW2WE98owVhIvjK4fOOwc6VYit0RWgv7Q930Fi/Z2Yi1kXdxPxOi
xXGIt8WH7GFlpgbL8pVLp5b7j+M3PP+fFI6V4eEo3v3cq9bCFjRCbPak6PvJZXEL
uOTAGtiiFvpqQnKRE3Hfk0YUz7Mkwi6ojCfuUJ3HZztSdkeaCOT1qDuUOQzTGmlu
A+ZTMEULPRx86Rt3B+p/0cwGcj5kv/AdbxMKme1J/vdaqPkxudcXdgNdr6rzKUb9
kDkFoIxSa2hzVQhdVWTql9H7nOn9tYxYhIgxG1cEvGX/JTJlGvGj61cs2ZIYfcZe
JDIzklzR2t1Z0b1ObzJApPOjhIgHNGx4mTwvGt7USsBknmAaValuF1Bd0cIYHQbx
TGso9VjcPCAYX9WqBCm+zx5cHJAVeSgKrEds+5VQk8Sq1Ue7vIz0XaautvcYJ2ri
SvLDmKvnB7Uql2zBzaNT0JUunfxgoSH+VWOGYt/iGwq2fv7gwM/1qDw+lWsRKYcP
tfuJzfA9i3PWOAMt6gfzNeAG+CuBP0eniNsbfSZEFSqGQksp9d70oHAi+jypxeTo
kV19AgMBAAGjZjBkMB0GA1UdDgQWBBQCF8NWbOOWvQ4kSeE0KVw/SNrTBjAfBgNV
HSMEGDAWgBTwBHAMCWmWUOt4fwVJ/e9B5Vw//zASBgNVHRMBAf8ECDAGAQH/AgEA
MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAYmglpKWb0/Cd0lmk
D8VgfBsuc5x84z0vVoPjbllLOqfT38ymHh37IFESb4osCX+uaKSs/KMNjSSzKGg+
GeYzf0iR2yGjezwTFDsIORjuPeoaMZjSVgxzenoIVqd8ptjBevkPzPXAlwkrQ5cl
Hsz3DXmBjkQ9dLpPYcqADr4ir8XnQFMFsXvX3rpsBZ5E6J7HeU98BHoPm4K0NrY+
+7P3nhlfyXrp5sSDUmkl2urZmlO8lQrIoGhz7vP/u9AER4YAvNG7WNDOzkMRwa3j
e05n2RCzaoev8q9SEtF5ZoIS3Kr9HjGbZ8m9rRMeKq8BpT0CYwR8aYM8CAtaqkeH
6/kSnU+7igOmNyaDamB5DvZ/cqNRLVqET03POe+n8HXPWT6D2NcCzkC6WAfoDJi5
6ktLszEFMCKSXQyRz80J05L05iGyVwk4PHqH7eVKPFvU14CC00y/v5d3Is/jQTKw
PZ6ma2tQ0Za9YDRQNANhXIq27XH9Hdyh56sD9LU7GoVG1+R1B4c/AL+mBMBZW1dG
iPWI4BSLQe8RR3L+ioXdN3d9CkJYu6V6DEL36GiH48wfar7bJN2+Dl6Lc+wgnomw
SHECBrOlJ+sjnSeqTFZK8fxs3QymRMQftm2CzXC2qVlOCILjnVrzNLo9ZUF+1Ljy
JFIv4reqq/pBswBULIRBV/mw0/U=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIGSjCCBDKgAwIBAgIJAI/Y8AsPkyadMA0GCSqGSIb3DQEBCwUAMIGxMQswCQYD
VQQGEwJVVjEUMBIGA1UECAwLQ29zbWljc3RhdGUxETAPBgNVBAcMCHplcm9uZXN0
MS4wLAYDVQQKDCVEZWNlbnRyYWxpemVkIE5vbWFkaWMgQW50cyBDb2xsZWN0aXZl
MRYwFAYDVQQLDA1MZWFkZXJMZXNzTGFiMQ8wDQYDVQQDDAZETkFDTDMxIDAeBgkq
hkiG9w0BCQEWEWVtcHR5QGRuYWMtbDMuaW9uMB4XDTE3MTIyMDIyMjEyM1oXDTM3
MTIxNTIyMjEyM1owgbExCzAJBgNVBAYTAlVWMRQwEgYDVQQIDAtDb3NtaWNzdGF0
ZTERMA8GA1UEBwwIemVyb25lc3QxLjAsBgNVBAoMJURlY2VudHJhbGl6ZWQgTm9t
YWRpYyBBbnRzIENvbGxlY3RpdmUxFjAUBgNVBAsMDUxlYWRlckxlc3NMYWIxDzAN
BgNVBAMMBkROQUNMMzEgMB4GCSqGSIb3DQEJARYRZW1wdHlAZG5hYy1sMy5pb24w
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDTwGwumwajfqm7N0LKUR4I
zxUL/qdCxOX0TLLGCQ9RppgJVudx0myggYKMupboI0Yf0z8pSsIQauPoLobAeoOg
71Z24iCiVMM7pJitoZxeMTu5ckWP7lDXeFUEt8cIpfmweSfariT2CK3mTgcJXxGk
qv4x4VPpzvT9CdXSYd7Vajl3lclc9RBLy+1iLO88MkcnSmp3/zCujPuBtEfXBzVt
AHgBUojebcEADyT9t33nXuVPUlUfT29LGi9UmohAsPSthj/if8gFfz2M/HXftyJv
qe/TxAcKpJviCOVf+BD9/pcjYREYQyO3V6SzGXi/D6ZlpQMsus27y69gZ8bZ4zKa
q7eneZb8Lrlu4Qb9+sdQORvzB5uJ+lshH6Vwyvkl1ejwsGT8HjqQX5T3UvshFg8n
28CwpvRNDWh7AA/i/+0w4mf6EXfute2M/mQbWPB69fzwv2ReXFTfUZAoT/mq/toU
yrsjw5a1saUlDxqkZn5CNuWS1ERnHbpuYTh4lvuMeJxkCBV9dZCI4PGGHaGAAiGV
PN+1YnYggPpHoeCY6iOyTrKkLEDChEofEvM1Z96W1TFgtyx5vUDYsgtpj/HS5BL+
7ZtjqsS8fXfBO39dgPQ1fd+vQkVeBFi7wvWYgxMpPP3GSP+iwNGDNcZnBzrKpHAt
FYp5AVM35D0oCU2W4sf6zwIDAQABo2MwYTAdBgNVHQ4EFgQU8ARwDAlpllDreH8F
Sf3vQeVcP/8wHwYDVR0jBBgwFoAU8ARwDAlpllDreH8FSf3vQeVcP/8wDwYDVR0T
AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAGcw
isfwKZ4/uhR7WKrViOlhOqcl6ju/f6tZvzM1rJ4OuyKnD4UUKXIAprv2OcjJCkEt
WkOPIIACQkqdbzm5brBJMBKD4PSz7F4gq6h2npw/7XZGVDQbWVRmS9gkS1v5qIh+
bckbyuNcA45o19i6P2JhxYMhQWx7hrv2DDsSEPy43QcdmAa1dQCVtjLb1cnMCvvE
D0gRljyeQwoZBQTFYHTrIBLmZ5s2+eTvor+x+PAK3JX711ldhIOuecGVOXmbT4Ep
dvDQQdXF0fQj0CDXcJk9YYXLy8uOGKaPql4mp+s5sNZ6U1DocZGmYyBhDbeOrW6C
rAFVKTx6akdOdr6NqMTV4ONeaZpNWpYiUetiqYM3FTajVpIomGIo80ulsyKvPZ+7
SR0/DK9QxvbFUskpcPSaxzXUhdLmzyGMjHCDcfEKqVq7NNQTfrTQfkTlSRa5XlMK
5DxYh3Tf2C+I1M2YxCIES3hzNB1ZfkJyTFK/ofsqqXI2BF2VreZ13QSWXQeabMho
iPUINNkwydk8NdCX6nDLlcLNxa/0Xlod3Xpxx2mRr7qS6Mv2uLQJfTsEj2fz4ks3
Bpuk2ySGYtpZQ49iKMG0q/+6Fc4vtrZRy0EgZkCqpCgLey402uk1MthwG1RotaCa
FzSxRdqTYrhc9DrDTLDnkVK36UBkIq1GFStnFMgE
-----END CERTIFICATE-----

View File

@ -1,19 +0,0 @@
-----BEGIN X509 CRL-----
MIIDKDCCARACAQEwDQYJKoZIhvcNAQELBQAwgasxCzAJBgNVBAYTAlVWMRQwEgYD
VQQIDAtDb3NtaWNzdGF0ZTEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBOb21hZGlj
IEFudHMgQ29sbGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEcMBoGA1UE
AwwTSW50ZXJtZWRpYXRlIEROQUNMMzEgMB4GCSqGSIb3DQEJARYRZW1wdHlAZG5h
Yy1sMy5pb24XDTE3MTIyMDIzNTgxMFoXDTE4MDExOTIzNTgxMFqgMDAuMB8GA1Ud
IwQYMBaAFAIXw1Zs45a9DiRJ4TQpXD9I2tMGMAsGA1UdFAQEAgIQADANBgkqhkiG
9w0BAQsFAAOCAgEASR0hqeSHzjupRqnV23EA6SVFpBm6iRJBhAPqN5ZjUtrBGtwT
8ID8T3uMEUNLmw+BbIEEyJwjD4ZuGLoE3nImVEy+b9xoAoyD0OFzmWsLmB9mL31X
hS1Wy9x05Ui7Q0mTVcq5wP5tK86CT8uNLqKbBVPnNJggDkRBGmrw6bJaZIewgw5i
FHvvpefNBNFpZs/LzGtgMsV47TOX5QQ+R10bNcCDgwZ0PlyQKGP0suo8JIJgnX65
gS35dJk3uAKNY949tQX1p0iCfOvqv6VQmEN/Xh1mPleejKhA2tMmI106lsBReHcQ
ftuTGX3GWCLStIQ62vDto+2cr9001HeXBvgp4EupVQmLJ4/RgdmgQ0w0yxFuzbtG
p+Ac1Mf8bUFhT0rMyvqYAkS8cZmbDQe0wt4ZC1Da3bYabAcODKfx+Rtx50Gg3oLD
2xdZ3VD0LLbti68yDRG+0nJj4uPy2db3FP1AFp6Imy/InMvl2L4CC7V8H6tNAHeP
z/SVSbv95+Zz60hZNheAChDL/VZzWi1RNBy01F9Txv8t+lTc1Cit4A63Z/bIPaju
XWq9js3qGhEWyyTIoNEqcUmlipDbHTTPyBpzP/RFp5Q7NrAXWfh9BLtx+u26GKu9
Bi9POgo8g+/gRjl/r3aNMvw5nd5OdyBFoANqFQNlIvaHObd8wYWI6JpeU0w=
-----END X509 CRL-----

View File

@ -1,63 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAuD40mDMjJDc1jVggh2fNn7b0q+Hwvpm1ZOWmlDwZrud5g6YY
JZQ/MfkZcGoFzTO8ZhF8n2Rw8ZQRcCB5co513Ljw3o2yDXTrtF355kB0EaD9zXPg
t9yLliU1Mm6ovJMII/U1uCGTjlhfIRpPmuNSFPqfsstEii8IuICVqISb2DTDjfZH
lUd4v3HQVsSQARz/g3K/vWOXQQv5p8pHDfz75wdMlEmSO5H49Lo423HwTxY14ZRt
Oc+yCc5LXNEti6aX0xI1pKrLK6OcT3bdiyAT755k31PpzwhaYm0C4/09TJR0SGqG
d199FJxtZtn1XOczks2X4I6FCtI1mOZjsIISDQIDAQABAoIBAHUZ5GfQKMe952Wc
25/0U/btW/lbM56RJ3+njwasZE6MhOktyi8oWDo07vxKBsHz3gyplKHiPv7sc2Dm
4wnyjerqCP4K5V40MBOFn22/DYae4nGQ8wUpQfb+Ux9owgqJESV5ltkfTh+s/1mU
1X/Gd8cpvBLxqvCBEHPkFjrzffIeEphlPjUi+hfN9tlNtkSm7hBMYD9eY5kLZDNN
eLicCXLgNQgf2cIyITp91hZTiWJORZyK7WxVnoOKYG7wASgniGN5cLxu205JIMpo
JeWFHtvOZitMUhhXSq+VZE9lycq9Y6lObhnb/8CrOd65o5EXg6CSHFGxa4p93xn7
BqT2RIECgYEA5lFo5R4TJpWbxs58v/HgL5JpRg5KoV11XV+jZLCE7LjjtYKDhobZ
tgG5ldEs9bpG+DEp/LpJgJnsHL75kQb1WFD/W5azm2Mqk4D0s/Dk4xj9H3RnNK08
JSKD5e4/8EFqPG/OG4ekqF/MRVB1Ag/uEE2t4IGkGazvlNlCOFXJiVECgYEAzMmN
QgzMwWk/sWe0N1hBYWHSwu6Xu0Xw0vzz++gm6K1Jb9tT/c2/FkPTjzy0auEq/KxR
vtzMvhJsib1LSdGMYb8m/yO2T/4ixtPWWMSVEMzpaVko+kgBEYZEnQwY9yVNW5YT
RSmDPNzEfQk1VJXU+71ArWD4U3jehmVh3SeITf0CgYEA1CC9FPFrQAlhbW03Nu6G
xin6bfyxvge/Fh4E9rMh6omrqLypeRjwohLapGMBHt3Pib52ZoAJPLoH3r4CTBH1
nL8VQnz9tatTRoHqX2UK2yNj1zm16K15jNZKV1blatpFB58OXQrEnBOHp3ugGSMk
570z3WhqBP/jtduYgxx4P9ECgYA1mIpfvvnXpLQGuh1wXqi909xVKByN5sAgeBNi
2l9UhgWyQLS6uB5KtXWpBzowvr2BY5gBW8g59phxdBlAOJeE/YI3RCFyzhBL+SUg
FlLtErQD3lHbLv0YmQtcDs554EleruhcMYEnbKAAGWjbulsDh4Rl2/sh3YBfrCjw
uGf0AQKBgQC4gUBKk5s9KLOtc9yValKHvU04P1wQWaoBPR6dTCrw/YAktqR24Zxl
z4V6pfvLkVWPVAZ2dZxerVG0hulP+tvwW2e76RNgFFlnNN0RRz07qWEwkKgJrEYW
1s95T7W0IigNugIUb2rUJ0Ld5Taa7C4z21IDU8lvO2JYBYFEZ9RaaQ==
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIGXDCCBESgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwgasxCzAJBgNVBAYTAlVW
MRQwEgYDVQQIDAtDb3NtaWNzdGF0ZTEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBO
b21hZGljIEFudHMgQ29sbGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEc
MBoGA1UEAwwTSW50ZXJtZWRpYXRlIEROQUNMMzEgMB4GCSqGSIb3DQEJARYRZW1w
dHlAZG5hYy1sMy5pb24wHhcNMTcxMjIxMDY0MjU4WhcNMTgxMjMxMDY0MjU4WjCB
tDELMAkGA1UEBhMCVVYxFDASBgNVBAgMC0Nvc21pY3N0YXRlMREwDwYDVQQHDAh6
ZXJvbmVzdDEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBOb21hZGljIEFudHMgQ29s
bGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjESMBAGA1UEAwwJbG9jYWxo
b3N0MSAwHgYJKoZIhvcNAQkBFhFlbXB0eUBkbmFjLWwzLmlvbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBALg+NJgzIyQ3NY1YIIdnzZ+29Kvh8L6ZtWTl
ppQ8Ga7neYOmGCWUPzH5GXBqBc0zvGYRfJ9kcPGUEXAgeXKOddy48N6Nsg1067Rd
+eZAdBGg/c1z4Lfci5YlNTJuqLyTCCP1Nbghk45YXyEaT5rjUhT6n7LLRIovCLiA
laiEm9g0w432R5VHeL9x0FbEkAEc/4Nyv71jl0EL+afKRw38++cHTJRJkjuR+PS6
ONtx8E8WNeGUbTnPsgnOS1zRLYuml9MSNaSqyyujnE923YsgE++eZN9T6c8IWmJt
AuP9PUyUdEhqhndffRScbWbZ9VznM5LNl+COhQrSNZjmY7CCEg0CAwEAAaOCAX0w
ggF5MAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQm
FiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYE
FKshZxJ5Md78vhJwU4B+l+yhBAM0MIHfBgNVHSMEgdcwgdSAFAIXw1Zs45a9DiRJ
4TQpXD9I2tMGoYG3pIG0MIGxMQswCQYDVQQGEwJVVjEUMBIGA1UECAwLQ29zbWlj
c3RhdGUxETAPBgNVBAcMCHplcm9uZXN0MS4wLAYDVQQKDCVEZWNlbnRyYWxpemVk
IE5vbWFkaWMgQW50cyBDb2xsZWN0aXZlMRYwFAYDVQQLDA1MZWFkZXJMZXNzTGFi
MQ8wDQYDVQQDDAZETkFDTDMxIDAeBgkqhkiG9w0BCQEWEWVtcHR5QGRuYWMtbDMu
aW9uggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJ
KoZIhvcNAQELBQADggIBABafbpGaUsKK5ydJeotIvF4Rf6/KHo0kBuhOgdvpXaVx
Epz2QFzMiHSa2WXFyc9oLsX7VecbFVSCzDmXYzF2wJ3zRmrQqfgbbrKMuZf3Isqp
XdtiYrdZvPJlPDaB8zWsmR/G9guumZicbhMR5OyrDJjYP4mHiBAnSsJpxO1ZaJHo
LBOjzLXeCOHRCcruMztOPS0+dGL/QL8hZpxrfBXWnDEBCQ5fre63j8aKKy4jwpjF
akjI/Q62RcUvfhMhBm1jFYhnycnPz7z76tEvm10eQpNuRJZfreY5GzQSPpMd8r8w
z9OC49HL5I7TpDW69Il6Ie+tEtbkyZ2411orKirTq1Yi15cmUHQZ/VHLPpVRorKJ
tUxH/otYCebB/W1afX1Y5rCAgC9/NCTlfN2oV08/TgP4jam0p3xLyLPG2qQa0Aza
96IbJdIH9mJkxK6BujFXiI1LNVgskFyYDlseXGUvn4Uwv4+uW/9HUNP39n8BVMwv
vqabJzoqTkcjQ00PmzcHu+PtwyImDqnD1oPL4yj32yEXN3LmKNQ7mG2YU+4kxg+e
ll5oxBu0Rtblp/fLo0UtuuCEarJglVduAA2ARLktzzV5ekIZDshUMttkY4HbcUMA
KJG8+Ci0PAhgLBd9LjagdBtqUIv4DK9+ZuoHLLyGNaqcOnmm/tRqdacUnKTPhhce
-----END CERTIFICATE-----

View File

@ -1,63 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAve+E6Vsf4i7YNnqn60NBihi5wFG44KGb+zeU1W45Fzr2wL4l
aQHXie7wsvdJrtw0j19Qb+mUTIQ9qaysZA04rZEYZLj6zrMC5zy41OfqMhO6vXZ6
f6SAL0Z+BfIroNj/hukcdBzMkXzZcrL/WG5Tr9SKfUY3ue4a4HW9PhHHp7cfB2AK
j/YxKioHIXQeKffv8q5EJ+HuHDOMFl56jubPiThDsDX90fzDUKsYTr7AobGVYCk6
sqQ+tCiOS98KZjQvmn9ufAReogAA4aKty79aM8j5PCbhykrcneTJG4Yi1Luxf20B
Z1rtO/g1T5XqxCAlHLSv1vTc1id0Yhna4HqRzQIDAQABAoIBAA87jmVIsZ3qM9zq
qVzs3+y7XupVo4Ygb+oN4xna5Sk/yVpb5ZQuWaLuTs+LlZU4g96bvk+7Y/FfCbL5
7jl39fu1j0dErpi1Mp5o+Ena76Vkf62COwDRqJYiCSlg6q7TuGqRd364cI+ae7X/
NQtajcukOBcj8oQu3Gj0CGFZ+7EvkxFV4rioRHKbwV16MEXMl6DyWzkMtlYVn89k
vX/rvuvtLvxY5FL4zHIarm9hBf0QFPq2lqfunCPWv+UM6rzFAYsqPC0evio00TEN
Sgq7B9L6tJGj+ZPjDfqo5+OG43Y/VSnybhM9FSW8pkV7zRTqnvudLPsyPWHHAxKJ
5NxUvwECgYEA7EKfIkBBMaFDGg3LeN7xzj569eQz5lTwZcBXNwd2O8I9A7Zs7fnE
3dGz8aIcoZToS3BJX/mQULNmhEqSQgNVYNNY1+ezFiZ1iohA+tiLfa0ut+G1QWfU
B2RdqkIK6VaRqFc8ct9QYHQ4lwTNwBqjJL5M7LUEHTOKMAs3JAC461UCgYEAzc4P
B43nyy5+Tm3ibGee/+yaXCA5jhJFlxcStwcvLCV3aAj82kY9YUNkOg52OBSsWvIJ
3rEHQzeO0CbckuwKJBpaDdf5c6//lNh7Q+YY2eUqDuB0+gLGOwjgOei8vCpA2+Uw
jkFcW4wLZ6MhqUNqG7rwgBg8s8ADgjfcgluWPJkCgYEAtt3iGjqtu2Jcc8AVq5q0
grRtrAFUAoXgBIHZf6hw5VQ0sbjzm0bmD+MOaorVqmN9YVaqFHUwNKaHRQILLrkx
zl4HuCWIwi5GcFGCKiaN4Vq4fG9hQhzBSm9u6CdyFqWA3FGZYjhjGCUc4BdBwd1c
RUZeeCCr7IZGAWXM6jr85nkCgYEAlx0bSge3UChGU945aDG5vN8FBGog+KGP+X3W
nw+mYnrsLzaSOs9iWF18ZWijA1CisXCRMLO5ynAr68jtIvAwDgxPXWTc5EYxmyBN
ejCkxLGQ7TWAjuakmUpOIhczJdK8pvmlan6xiwLMiP9uQvOeINhlqRii0bVGnkHY
JtuK99kCgYEAiOU8CUf1XeucHXqvjZ6G2J0OQLr9fr8grL8o1GOekjWw6rrWDndR
aeE17Eb6coXgKcKBKuUM0hRcaWdlfzU2U97ZGaynGVW5kAUSwK1huKBeiwUsqhhD
Ur/cQ5+7852Dkt3+3PzkMEZiXUbZLzQs/VHh1G3c+aiDb1ff/jKttqU=
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIGVjCCBD6gAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwgasxCzAJBgNVBAYTAlVW
MRQwEgYDVQQIDAtDb3NtaWNzdGF0ZTEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBO
b21hZGljIEFudHMgQ29sbGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEc
MBoGA1UEAwwTSW50ZXJtZWRpYXRlIEROQUNMMzEgMB4GCSqGSIb3DQEJARYRZW1w
dHlAZG5hYy1sMy5pb24wHhcNMTcxMjIxMDQzNzQ5WhcNMTgxMjMxMDQzNzQ5WjCB
rjELMAkGA1UEBhMCVVYxFDASBgNVBAgMC0Nvc21pY3N0YXRlMREwDwYDVQQHDAh6
ZXJvbmVzdDEuMCwGA1UECgwlRGVjZW50cmFsaXplZCBOb21hZGljIEFudHMgQ29s
bGVjdGl2ZTEWMBQGA1UECwwNTGVhZGVyTGVzc0xhYjEMMAoGA1UEAwwDbWRiMSAw
HgYJKoZIhvcNAQkBFhFlbXB0eUBkbmFjLWwzLmlvbjCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAL3vhOlbH+Iu2DZ6p+tDQYoYucBRuOChm/s3lNVuORc6
9sC+JWkB14nu8LL3Sa7cNI9fUG/plEyEPamsrGQNOK2RGGS4+s6zAuc8uNTn6jIT
ur12en+kgC9GfgXyK6DY/4bpHHQczJF82XKy/1huU6/Uin1GN7nuGuB1vT4Rx6e3
HwdgCo/2MSoqByF0Hin37/KuRCfh7hwzjBZeeo7mz4k4Q7A1/dH8w1CrGE6+wKGx
lWApOrKkPrQojkvfCmY0L5p/bnwEXqIAAOGircu/WjPI+Twm4cpK3J3kyRuGItS7
sX9tAWda7Tv4NU+V6sQgJRy0r9b03NYndGIZ2uB6kc0CAwEAAaOCAX0wggF5MAkG
A1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG+EIBDQQmFiRPcGVu
U1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFM+nlfQo
sgAM8pg4f990+am3gNKzMIHfBgNVHSMEgdcwgdSAFAIXw1Zs45a9DiRJ4TQpXD9I
2tMGoYG3pIG0MIGxMQswCQYDVQQGEwJVVjEUMBIGA1UECAwLQ29zbWljc3RhdGUx
ETAPBgNVBAcMCHplcm9uZXN0MS4wLAYDVQQKDCVEZWNlbnRyYWxpemVkIE5vbWFk
aWMgQW50cyBDb2xsZWN0aXZlMRYwFAYDVQQLDA1MZWFkZXJMZXNzTGFiMQ8wDQYD
VQQDDAZETkFDTDMxIDAeBgkqhkiG9w0BCQEWEWVtcHR5QGRuYWMtbDMuaW9uggIQ
ADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcN
AQELBQADggIBAJak6j+P+NoPsNR5uNxSZpVDmnE6uthhzYi7q1Svu62y4H8xt9sS
0QCpTDPntdBErpeq2nysYm9u+d0PUBfHD3iqlgHrEqFnZbixAcrB0DfqUWnclALD
GAyy/6R93wKfXAQrzAbXTmMDwfkw1s0BDVhejAMVOniGcNl13LY0g935EgC14x1y
JADeB7hWcPQO4HTPUm1X1vAGwAlXFd4Q8vY/kWqCezc9/TZRuzK/oRThLA1GNegc
BZpzbHF4wv9SG95SEsCDAUN6Z350ylVyuRfOd68r8nq20NSBybwYhJALinYHJPxY
4vgcAykqWp2Aff1HaKat7ps6iJBic8n+SNOmQNs2LWCppTjNLDaZPE8TIMF74pix
NUvR6OCF433cYIBXRPRk6J4U67grC+r/yUTieUOIrfAMoXrM9VhqErJW1yk/H4/O
yAsVUS26it3R+FbCSg1Pa2Dt9dgt401Y2xaEQ4nEC6zQblwQHq9rM364Ji47uFM6
gsVLJXysJ/i+vFcHHIufCOJml0+azDzfD3XVRFChenQwwJVmKgwCtNEgy3+EoKbx
zkV4w4eagLQZsyCtl5Ed2iawMQVV9ECAsqKU+IUN93rGHpBMEBzNpN94YehTcYC4
gfQmny5f0yXm7XE2exjFbkPXpqOpBajFl2Wmaz6yqc+0CNeRxRpj4ksX
-----END CERTIFICATE-----

View File

@ -1,180 +0,0 @@
from unittest import mock
import pytest
import pymongo
from pymongo.database import Database
from ssl import CERT_REQUIRED
pytestmark = pytest.mark.bdb_ssl
@pytest.fixture
def mock_ssl_cmd_line_opts(ssl_context, mdb_ssl_pem_key):
return {'argv': [
'mongod',
'--dbpath=/data',
'--replSet=bigchain-rs',
'--sslMode=requireSSL',
'--sslAllowInvalidHostnames',
'--sslCAFile=' + ssl_context.ca,
'--sslCRLFile=' + ssl_context.crl,
'--sslPEMKeyFile=' + mdb_ssl_pem_key,
'--sslPEMKeyPassword=""'
],
'ok': 1.0,
'parsed': {'replication': {'replSet': 'bigchain-rs'},
'storage': {'dbPath': '/data'}}
}
def test_ssl_get_connection_returns_the_correct_instance(db_host, db_port, ssl_context):
from bigchaindb.backend import connect
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.mongodb.connection import MongoDBConnection
config = {
'backend': 'mongodb',
'host': db_host,
'port': db_port,
'name': 'test',
'replicaset': 'bigchain-rs',
'ssl': True,
'ca_cert': ssl_context.ca,
'crlfile': ssl_context.crl,
'certfile': ssl_context.cert,
'keyfile': ssl_context.key,
'keyfile_passphrase': ''
}
conn = connect(**config)
assert isinstance(conn, Connection)
assert isinstance(conn, MongoDBConnection)
assert conn.conn._topology_settings.replica_set_name == config['replicaset']
@mock.patch('pymongo.database.Database.authenticate')
def test_ssl_connection_with_credentials(mock_authenticate):
import bigchaindb
from bigchaindb.backend.mongodb.connection import MongoDBConnection
conn = MongoDBConnection(host=bigchaindb.config['database']['host'],
port=bigchaindb.config['database']['port'],
login='theplague',
password='secret',
ssl=bigchaindb.config['database']['ssl'],
ssl_ca_certs=bigchaindb.config['database']['ca_cert'],
ssl_certfile=bigchaindb.config['database']['certfile'],
ssl_keyfile=bigchaindb.config['database']['keyfile'],
ssl_pem_passphrase=bigchaindb.config['database']['keyfile_passphrase'],
ssl_crlfile=bigchaindb.config['database']['crlfile'],
ssl_cert_reqs=CERT_REQUIRED)
conn.connect()
assert mock_authenticate.call_count == 2
def test_ssl_initialize_replica_set(mock_ssl_cmd_line_opts, ssl_context):
from bigchaindb.backend.mongodb.connection import initialize_replica_set
from bigchaindb.common.exceptions import ConfigurationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_ssl_cmd_line_opts,
None,
{'log': ['database writes are now permitted']},
]
# check that it returns
assert initialize_replica_set('host',
1337,
1000,
'dbname',
True,
None,
None,
ssl_context.ca,
ssl_context.cert,
ssl_context.key,
'',
ssl_context.crl) is None
# test it raises OperationError if anything wrong
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_ssl_cmd_line_opts,
pymongo.errors.OperationFailure(None, details={'codeName': ''})
]
with pytest.raises(pymongo.errors.OperationFailure):
initialize_replica_set('host',
1337,
1000,
'dbname',
True,
None,
None,
ssl_context.ca,
ssl_context.cert,
ssl_context.key,
'',
ssl_context.crl) is None
# pass an explicit ssl=False so that pymongo throws a
# ConfigurationError
with pytest.raises(ConfigurationError):
initialize_replica_set('host',
1337,
1000,
'dbname',
False,
None,
None,
ssl_context.ca,
ssl_context.cert,
ssl_context.key,
'',
ssl_context.crl) is None
def test_ssl_invalid_configuration(db_host, db_port, ssl_context):
from bigchaindb.backend import connect
from bigchaindb.common.exceptions import ConfigurationError
config = {
'backend': 'mongodb',
'host': db_host,
'port': db_port,
'name': 'test',
'replicaset': 'bigchain-rs',
'ssl': False,
'ca_cert': ssl_context.ca,
'crlfile': ssl_context.crl,
'certfile': ssl_context.cert,
'keyfile': ssl_context.key,
'keyfile_passphrase': ''
}
with pytest.raises(ConfigurationError):
conn = connect(**config)
assert conn.conn._topology_settings.replica_set_name == config['replicaset']
def test_ssl_connection_with_wrong_credentials():
import bigchaindb
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.exceptions import ConnectionError
conn = MongoDBConnection(host=bigchaindb.config['database']['host'],
port=bigchaindb.config['database']['port'],
login='my_login',
password='my_super_secret_password',
ssl=bigchaindb.config['database']['ssl'],
ssl_ca_certs=bigchaindb.config['database']['ca_cert'],
ssl_certfile=bigchaindb.config['database']['certfile'],
ssl_keyfile=bigchaindb.config['database']['keyfile'],
ssl_pem_passphrase=bigchaindb.config['database']['keyfile_passphrase'],
ssl_crlfile=bigchaindb.config['database']['crlfile'],
ssl_cert_reqs=CERT_REQUIRED)
with pytest.raises(ConnectionError):
conn._connect()

View File

@ -1,108 +0,0 @@
"""Tests for the :mod:`bigchaindb.backend.mongodb.admin` module."""
import copy
from unittest import mock
import pytest
from pymongo.database import Database
from pymongo.errors import OperationFailure
@pytest.fixture
def mock_replicaset_config():
return {
'config': {
'_id': 'bigchain-rs',
'members': [
{
'_id': 0,
'arbiterOnly': False,
'buildIndexes': True,
'hidden': False,
'host': 'localhost:27017',
'priority': 1.0,
'slaveDelay': 0,
'tags': {},
'votes': 1
}
],
'version': 1
}
}
@pytest.fixture
def connection():
from bigchaindb.backend import connect
connection = connect()
# connection is a lazy object. It only actually creates a connection to
# the database when its first used.
# During the setup of a MongoDBConnection some `Database.command` are
# executed to make sure that the replica set is correctly initialized.
# Here we force the the connection setup so that all required
# `Database.command` are executed before we mock them it in the tests.
connection.connect()
return connection
def test_add_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
expected_config['config']['version'] += 1
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
add_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_add_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import add_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
add_replicas(connection, ['localhost:27018'])
def test_remove_replicas(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
expected_config = copy.deepcopy(mock_replicaset_config)
expected_config['config']['version'] += 1
# add some hosts to the configuration to remove
mock_replicaset_config['config']['members'] += [
{'_id': 1, 'host': 'localhost:27018'},
{'_id': 2, 'host': 'localhost:27019'}
]
with mock.patch.object(Database, 'command') as mock_command:
mock_command.return_value = mock_replicaset_config
remove_replicas(connection, ['localhost:27018', 'localhost:27019'])
mock_command.assert_called_with('replSetReconfig',
expected_config['config'])
def test_remove_replicas_raises(mock_replicaset_config, connection):
from bigchaindb.backend.admin import remove_replicas
from bigchaindb.backend.exceptions import OperationError
with mock.patch.object(Database, 'command') as mock_command:
mock_command.side_effect = [
mock_replicaset_config,
OperationFailure(error=1, details={'errmsg': ''})
]
with pytest.raises(OperationError):
remove_replicas(connection, ['localhost:27018'])

View File

@ -1,149 +0,0 @@
from unittest import mock
import pytest
from multipipes import Pipe
@pytest.fixture
def mock_changefeed_data():
return [
{
'op': 'i',
'o': {'_id': '', 'msg': 'seems like we have an insert here'},
'ts': 1,
},
{
'op': 'd',
'o': {'msg': 'seems like we have a delete here'},
'ts': 2,
},
{
'op': 'u',
'o': {'msg': 'seems like we have an update here'},
'o2': {'_id': 'some-id'},
'ts': 3,
},
]
@pytest.mark.bdb
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
@mock.patch('pymongo.cursor.Cursor.next')
def test_changefeed_insert(mock_cursor_next, mock_changefeed_data):
from bigchaindb.backend import get_changefeed, connect
from bigchaindb.backend.changefeed import ChangeFeed
# setup connection and mocks
conn = connect()
# mock the `next` method of the cursor to return the mocked data
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
outpipe = Pipe()
changefeed = get_changefeed(conn, 'backlog', ChangeFeed.INSERT)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get()['msg'] == 'seems like we have an insert here'
assert outpipe.qsize() == 0
@pytest.mark.bdb
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
@mock.patch('pymongo.cursor.Cursor.next')
def test_changefeed_delete(mock_cursor_next, mock_changefeed_data):
from bigchaindb.backend import get_changefeed, connect
from bigchaindb.backend.changefeed import ChangeFeed
conn = connect()
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
outpipe = Pipe()
changefeed = get_changefeed(conn, 'backlog', ChangeFeed.DELETE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get()['msg'] == 'seems like we have a delete here'
assert outpipe.qsize() == 0
@pytest.mark.bdb
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
@mock.patch('pymongo.collection.Collection.find_one')
@mock.patch('pymongo.cursor.Cursor.next')
def test_changefeed_update(mock_cursor_next, mock_cursor_find_one,
mock_changefeed_data):
from bigchaindb.backend import get_changefeed, connect
from bigchaindb.backend.changefeed import ChangeFeed
conn = connect()
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
outpipe = Pipe()
changefeed = get_changefeed(conn, 'backlog', ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get()['msg'] == 'seems like we have an update here'
assert outpipe.qsize() == 0
assert mock_cursor_find_one.called_once_with(
{'_id': mock_changefeed_data[2]['o']},
{'_id': False}
)
@pytest.mark.bdb
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
@mock.patch('pymongo.collection.Collection.find_one')
@mock.patch('pymongo.cursor.Cursor.next')
def test_changefeed_multiple_operations(mock_cursor_next, mock_cursor_find_one,
mock_changefeed_data):
from bigchaindb.backend import get_changefeed, connect
from bigchaindb.backend.changefeed import ChangeFeed
conn = connect()
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
mock_cursor_find_one.return_value = mock_changefeed_data[2]['o']
outpipe = Pipe()
changefeed = get_changefeed(conn, 'backlog',
ChangeFeed.INSERT | ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get()['msg'] == 'seems like we have an insert here'
assert outpipe.get()['msg'] == 'seems like we have an update here'
assert outpipe.qsize() == 0
@pytest.mark.bdb
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
@mock.patch('pymongo.cursor.Cursor.next')
def test_changefeed_prefeed(mock_cursor_next, mock_changefeed_data):
from bigchaindb.backend import get_changefeed, connect
from bigchaindb.backend.changefeed import ChangeFeed
conn = connect()
mock_cursor_next.side_effect = [mock.DEFAULT] + mock_changefeed_data
outpipe = Pipe()
changefeed = get_changefeed(conn, 'backlog', ChangeFeed.INSERT,
prefeed=[1, 2, 3])
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.qsize() == 4
@pytest.mark.bdb
def test_connection_failure():
from bigchaindb.backend.exceptions import ConnectionError
from bigchaindb.backend.mongodb.changefeed import run_changefeed
conn = mock.MagicMock()
conn.run.side_effect = [ConnectionError(), RuntimeError()]
changefeed = run_changefeed(conn, 'backlog', -1)
with pytest.raises(RuntimeError):
for record in changefeed:
assert False, 'Shouldn\'t get here'

View File

@ -1,24 +0,0 @@
import pytest
from unittest.mock import MagicMock
pytestmark = pytest.mark.bdb
@pytest.mark.skipif(reason='Will be handled by #1126')
def test_asset_id_index():
from bigchaindb.backend.mongodb.query import get_txids_filtered
from bigchaindb.backend import connect
# Passes a mock in place of a connection to get the query params from the
# query function, then gets the explain plan from MongoDB to test that
# it's using certain indexes.
m = MagicMock()
get_txids_filtered(m, '')
pipeline = m.db['bigchain'].aggregate.call_args[0][0]
run = connect().db.command
res = run('aggregate', 'bigchain', pipeline=pipeline, explain=True)
stages = (res['stages'][0]['$cursor']['queryPlanner']['winningPlan']
['inputStage']['inputStages'])
indexes = [s['inputStage']['indexName'] for s in stages]
assert set(indexes) == {'asset_id', 'transaction_id'}

View File

@ -1,617 +0,0 @@
from copy import deepcopy
import pytest
from unittest import mock
import pymongo
pytestmark = pytest.mark.bdb
def test_write_transaction(signed_create_tx):
from bigchaindb.backend import connect, query
conn = connect()
# write the transaction
query.write_transaction(conn, signed_create_tx.to_dict())
# get the transaction
tx_db = conn.db.backlog.find_one({'id': signed_create_tx.id},
{'_id': False})
assert tx_db == signed_create_tx.to_dict()
def test_update_transaction(signed_create_tx):
from bigchaindb.backend import connect, query
conn = connect()
# update_transaction can update any field we want, but lets update the
# same fields that are updated by bigchaindb core.
signed_create_tx = signed_create_tx.to_dict()
signed_create_tx.update({'assignee': 'aaa', 'assignment_timestamp': 10})
conn.db.backlog.insert_one(signed_create_tx)
query.update_transaction(conn, signed_create_tx['id'],
{'assignee': 'bbb', 'assignment_timestamp': 20})
tx_db = conn.db.backlog.find_one({'id': signed_create_tx['id']},
{'_id': False})
assert tx_db['assignee'] == 'bbb'
assert tx_db['assignment_timestamp'] == 20
def test_delete_transaction(signed_create_tx):
from bigchaindb.backend import connect, query
conn = connect()
# write_the transaction
result = conn.db.backlog.insert_one(signed_create_tx.to_dict())
# delete transaction
query.delete_transaction(conn, signed_create_tx.id)
tx_db = conn.db.backlog.find_one({'_id': result.inserted_id})
assert tx_db is None
def test_get_stale_transactions(signed_create_tx):
import time
from bigchaindb.backend import connect, query
conn = connect()
# create two transaction, one of them stale
tx1 = signed_create_tx.to_dict()
tx1.update({'id': 'notstale', 'assignment_timestamp': time.time()})
tx2 = signed_create_tx.to_dict()
tx2.update({'id': 'stale', 'assignment_timestamp': time.time() - 60})
# write the transactions
conn.db.backlog.insert_one(tx1)
conn.db.backlog.insert_one(tx2)
# get stale transactions
stale_txs = list(query.get_stale_transactions(conn, 30))
assert len(stale_txs) == 1
assert stale_txs[0]['id'] == 'stale'
def test_get_transaction_from_block(user_pk):
from bigchaindb.backend import connect, query
from bigchaindb.models import Transaction, Block
conn = connect()
# create a block with 2 transactions
txs = [
Transaction.create([user_pk], [([user_pk], 1)]),
Transaction.create([user_pk], [([user_pk], 1)]),
]
block = Block(transactions=txs)
conn.db.bigchain.insert_one(block.to_dict())
tx_db = query.get_transaction_from_block(conn, txs[0].id, block.id)
assert tx_db == txs[0].to_dict()
assert query.get_transaction_from_block(conn, txs[0].id, 'aaa') is None
assert query.get_transaction_from_block(conn, 'aaa', block.id) is None
def test_get_transaction_from_backlog(create_tx):
from bigchaindb.backend import connect, query
conn = connect()
# insert transaction
conn.db.backlog.insert_one(create_tx.to_dict())
# query the backlog
tx_db = query.get_transaction_from_backlog(conn, create_tx.id)
assert tx_db == create_tx.to_dict()
def test_get_block_status_from_transaction(create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create a block
block = Block(transactions=[create_tx], voters=['aaa', 'bbb', 'ccc'])
# insert block
conn.db.bigchain.insert_one(block.to_dict())
block_db = list(query.get_blocks_status_from_transaction(conn,
create_tx.id))
assert len(block_db) == 1
block_db = block_db.pop()
assert block_db['id'] == block.id
assert block_db['block']['voters'] == block.voters
def test_get_asset_by_id(create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create asset and block
create_tx.asset = {'msg': 'aaa'}
block = Block(transactions=[create_tx])
conn.db.bigchain.insert_one(block.to_dict())
asset = list(query.get_asset_by_id(conn, create_tx.id))
assert len(asset) == 1
assert asset[0]['asset'] == create_tx.asset
def test_get_spent(signed_create_tx, signed_transfer_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert two blocks, one for the create and one for the
# transfer transaction
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
block = Block(transactions=[signed_transfer_tx])
conn.db.bigchain.insert_one(block.to_dict())
spents = list(query.get_spent(conn, signed_create_tx.id, 0))
assert len(spents) == 1
assert spents[0] == signed_transfer_tx.to_dict()
def test_get_spent_for_tx_with_multiple_inputs(carol):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block, Transaction
conn = connect()
tx_0 = Transaction.create(
[carol.public_key],
[([carol.public_key], 1),
([carol.public_key], 1),
([carol.public_key], 2)],
).sign([carol.private_key])
block = Block(transactions=[tx_0])
conn.db.bigchain.insert_one(block.to_dict())
spents = list(query.get_spent(conn, tx_0.id, 0))
assert not spents
tx_1 = Transaction.transfer(
tx_0.to_inputs()[2:3],
[([carol.public_key], 1),
([carol.public_key], 1)],
asset_id=tx_0.id,
).sign([carol.private_key])
block = Block(transactions=[tx_1])
conn.db.bigchain.insert_one(block.to_dict())
spents = list(query.get_spent(conn, tx_0.id, 0))
assert not spents
tx_2 = Transaction.transfer(
tx_0.to_inputs()[0:1] + tx_1.to_inputs()[1:2],
[([carol.public_key], 2)],
asset_id=tx_0.id,
).sign([carol.private_key])
block = Block(transactions=[tx_2])
conn.db.bigchain.insert_one(block.to_dict())
spents = list(query.get_spent(conn, tx_0.id, 1))
assert not spents
def test_get_owned_ids(signed_create_tx, user_pk):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert a block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
[(block_id, tx)] = list(query.get_owned_ids(conn, user_pk))
assert block_id == block.id
assert tx == signed_create_tx.to_dict()
def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote):
from bigchaindb.common.crypto import generate_key_pair
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert a block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
# create and insert some votes
structurally_valid_vote['vote']['voting_for_block'] = block.id
conn.db.votes.insert_one(structurally_valid_vote)
# create a second vote under a different key
_, pk = generate_key_pair()
structurally_valid_vote['vote']['voting_for_block'] = block.id
structurally_valid_vote['node_pubkey'] = pk
structurally_valid_vote.pop('_id')
conn.db.votes.insert_one(structurally_valid_vote)
votes = list(query.get_votes_by_block_id(conn, block.id))
assert len(votes) == 2
assert votes[0]['vote']['voting_for_block'] == block.id
assert votes[1]['vote']['voting_for_block'] == block.id
def test_get_votes_by_block_id_and_voter(signed_create_tx,
structurally_valid_vote):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert a block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
# create and insert some votes
structurally_valid_vote['vote']['voting_for_block'] = block.id
structurally_valid_vote['node_pubkey'] = 'aaa'
conn.db.votes.insert_one(structurally_valid_vote)
structurally_valid_vote['vote']['voting_for_block'] = block.id
structurally_valid_vote['node_pubkey'] = 'bbb'
structurally_valid_vote.pop('_id')
conn.db.votes.insert_one(structurally_valid_vote)
votes = list(query.get_votes_by_block_id_and_voter(conn, block.id, 'aaa'))
assert len(votes) == 1
assert votes[0]['node_pubkey'] == 'aaa'
def test_write_block(signed_create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and write block
block = Block(transactions=[signed_create_tx])
query.write_block(conn, block.to_dict())
block_db = conn.db.bigchain.find_one({'id': block.id}, {'_id': False})
assert block_db == block.to_dict()
def test_get_block(signed_create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
# create and insert block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
block_db = query.get_block(conn, block.id)
assert block_db == block.to_dict()
def test_count_blocks(signed_create_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
conn = connect()
assert query.count_blocks(conn) == 0
# create and insert some blocks
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
assert query.count_blocks(conn) == 1
def test_count_backlog(signed_create_tx, signed_transfer_tx):
from bigchaindb.backend import connect, query
conn = connect()
# create and insert some transations
conn.db.backlog.insert_one(signed_create_tx.to_dict())
conn.db.backlog.insert_one(signed_transfer_tx.to_dict())
assert query.count_backlog(conn) == 2
def test_write_vote(structurally_valid_vote):
from bigchaindb.backend import connect, query
conn = connect()
# write a vote
query.write_vote(conn, structurally_valid_vote)
# retrieve the vote
vote_db = conn.db.votes.find_one(
{'node_pubkey': structurally_valid_vote['node_pubkey']},
{'_id': False}
)
assert vote_db == structurally_valid_vote
def test_duplicate_vote_raises_duplicate_key(structurally_valid_vote):
from bigchaindb.backend import connect, query
from bigchaindb.backend.exceptions import DuplicateKeyError
conn = connect()
# write a vote
query.write_vote(conn, structurally_valid_vote)
# write the same vote a second time
with pytest.raises(DuplicateKeyError):
query.write_vote(conn, structurally_valid_vote)
def test_get_genesis_block(genesis_block):
from bigchaindb.backend import connect, query
conn = connect()
assets, genesis_block_dict = genesis_block.decouple_assets()
metadata, genesis_block_dict = genesis_block.decouple_metadata(genesis_block_dict)
assert query.get_genesis_block(conn) == genesis_block_dict
def test_get_last_voted_block_id(genesis_block, signed_create_tx, b):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block
from bigchaindb.common.exceptions import CyclicBlockchainError
conn = connect()
# check that the last voted block is the genesis block
assert query.get_last_voted_block_id(conn, b.me) == genesis_block.id
# create and insert a new vote and block
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
vote = b.vote(block.id, genesis_block.id, True)
conn.db.votes.insert_one(vote)
assert query.get_last_voted_block_id(conn, b.me) == block.id
# force a bad chain
vote.pop('_id')
vote['vote']['voting_for_block'] = genesis_block.id
vote['vote']['previous_block'] = block.id
conn.db.votes.insert_one(vote)
with pytest.raises(CyclicBlockchainError):
query.get_last_voted_block_id(conn, b.me)
def test_get_txids_filtered(signed_create_tx, signed_transfer_tx):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block, Transaction
conn = connect()
# create and insert two blocks, one for the create and one for the
# transfer transaction
block = Block(transactions=[signed_create_tx])
conn.db.bigchain.insert_one(block.to_dict())
block = Block(transactions=[signed_transfer_tx])
conn.db.bigchain.insert_one(block.to_dict())
asset_id = Transaction.get_asset_id([signed_create_tx, signed_transfer_tx])
# Test get by just asset id
txids = set(query.get_txids_filtered(conn, asset_id))
assert txids == {signed_create_tx.id, signed_transfer_tx.id}
# Test get by asset and CREATE
txids = set(query.get_txids_filtered(conn, asset_id, Transaction.CREATE))
assert txids == {signed_create_tx.id}
# Test get by asset and TRANSFER
txids = set(query.get_txids_filtered(conn, asset_id, Transaction.TRANSFER))
assert txids == {signed_transfer_tx.id}
@mock.patch('bigchaindb.backend.mongodb.changefeed._FEED_STOP', True)
def test_get_new_blocks_feed(b, create_tx):
from bigchaindb.backend import query
from bigchaindb.models import Block
import random
def create_block():
ts = str(random.random())
block = Block(transactions=[create_tx], timestamp=ts)
b.write_block(block)
block_dict = block.decouple_assets()[1]
return block.decouple_metadata(block_dict)[1]
create_block()
b1 = create_block()
b2 = create_block()
feed = query.get_new_blocks_feed(b.connection, b1['id'])
assert feed.__next__() == b2
b3 = create_block()
assert list(feed) == [b3]
def test_get_spending_transactions(user_pk, user_sk):
from bigchaindb.backend import connect, query
from bigchaindb.models import Block, Transaction
conn = connect()
out = [([user_pk], 1)]
tx1 = Transaction.create([user_pk], out * 3)
tx1.sign([user_sk])
inputs = tx1.to_inputs()
tx2 = Transaction.transfer([inputs[0]], out, tx1.id)
tx2.sign([user_sk])
tx3 = Transaction.transfer([inputs[1]], out, tx1.id)
tx3.sign([user_sk])
tx4 = Transaction.transfer([inputs[2]], out, tx1.id)
tx4.sign([user_sk])
block = Block([tx1, tx2, tx3, tx4])
conn.db.bigchain.insert_one(block.to_dict())
links = [inputs[0].fulfills.to_dict(), inputs[2].fulfills.to_dict()]
res = list(query.get_spending_transactions(conn, links))
# tx3 not a member because input 1 not asked for
assert res == [(block.id, tx2.to_dict()), (block.id, tx4.to_dict())]
def test_get_votes_for_blocks_by_voter():
from bigchaindb.backend import connect, query
conn = connect()
votes = [
{
'node_pubkey': 'a',
'vote': {'voting_for_block': 'block1'},
},
{
'node_pubkey': 'b',
'vote': {'voting_for_block': 'block1'},
},
{
'node_pubkey': 'a',
'vote': {'voting_for_block': 'block2'},
},
{
'node_pubkey': 'a',
'vote': {'voting_for_block': 'block3'},
}
]
for vote in votes:
conn.db.votes.insert_one(vote.copy())
res = query.get_votes_for_blocks_by_voter(conn, ['block1', 'block2'], 'a')
assert list(res) == [votes[0], votes[2]]
def test_write_assets():
from bigchaindb.backend import connect, query
conn = connect()
assets = [
{'id': 1, 'data': '1'},
{'id': 2, 'data': '2'},
{'id': 3, 'data': '3'},
# Duplicated id. Should not be written to the database
{'id': 1, 'data': '1'},
]
# write the assets
query.write_assets(conn, deepcopy(assets))
# check that 3 assets were written to the database
cursor = conn.db.assets.find({}, projection={'_id': False})\
.sort('id', pymongo.ASCENDING)
assert cursor.count() == 3
assert list(cursor) == assets[:-1]
def test_get_assets():
from bigchaindb.backend import connect, query
conn = connect()
assets = [
{'id': 1, 'data': '1'},
{'id': 2, 'data': '2'},
{'id': 3, 'data': '3'},
]
# write the assets
conn.db.assets.insert_many(deepcopy(assets), ordered=False)
# read only 2 assets
cursor = query.get_assets(conn, [1, 3])
assert cursor.count() == 2
assert list(cursor.sort('id', pymongo.ASCENDING)) == assets[::2]
@pytest.mark.parametrize('table', ['assets', 'metadata'])
def test_text_search(table):
from bigchaindb.backend import connect, query
conn = connect()
# Example data and tests cases taken from the mongodb documentation
# https://docs.mongodb.com/manual/reference/operator/query/text/
objects = [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
{'id': 3, 'subject': 'Baking a cake', 'author': 'abc', 'views': 90},
{'id': 4, 'subject': 'baking', 'author': 'xyz', 'views': 100},
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# insert the assets
conn.db[table].insert_many(deepcopy(objects), ordered=False)
# test search single word
assert list(query.text_search(conn, 'coffee', table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
]
# match any of the search terms
assert list(query.text_search(conn, 'bake coffee cake', table=table)) == [
{'author': 'abc', 'id': 3, 'subject': 'Baking a cake', 'views': 90},
{'author': 'xyz', 'id': 1, 'subject': 'coffee', 'views': 50},
{'author': 'xyz', 'id': 4, 'subject': 'baking', 'views': 100},
{'author': 'efg', 'id': 2, 'subject': 'Coffee Shopping', 'views': 5},
{'author': 'efg', 'id': 7, 'subject': 'coffee and cream', 'views': 10}
]
# search for a phrase
assert list(query.text_search(conn, '\"coffee shop\"', table=table)) == [
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]
# exclude documents that contain a term
assert list(query.text_search(conn, 'coffee -shop', table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10},
]
# search different language
assert list(query.text_search(conn, 'leche', language='es', table=table)) == [
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# case and diacritic insensitive search
assert list(query.text_search(conn, 'сы́рники CAFÉS', table=table)) == [
{'id': 6, 'subject': 'Сырники', 'author': 'jkl', 'views': 80},
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
{'id': 8, 'subject': 'Cafe con Leche', 'author': 'xyz', 'views': 10}
]
# case sensitive search
assert list(query.text_search(conn, 'Coffee', case_sensitive=True, table=table)) == [
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]
# diacritic sensitive search
assert list(query.text_search(conn, 'CAFÉ', diacritic_sensitive=True, table=table)) == [
{'id': 5, 'subject': 'Café Con Leche', 'author': 'abc', 'views': 200},
]
# return text score
assert list(query.text_search(conn, 'coffee', text_score=True, table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50, 'score': 1.0},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5, 'score': 0.75},
{'id': 7, 'subject': 'coffee and cream', 'author': 'efg', 'views': 10, 'score': 0.75},
]
# limit search result
assert list(query.text_search(conn, 'coffee', limit=2, table=table)) == [
{'id': 1, 'subject': 'coffee', 'author': 'xyz', 'views': 50},
{'id': 2, 'subject': 'Coffee Shopping', 'author': 'efg', 'views': 5},
]

View File

@ -1,112 +0,0 @@
import pytest
pytestmark = pytest.mark.bdb
def test_init_creates_db_tables_and_indexes():
import bigchaindb
from bigchaindb import backend
from bigchaindb.backend.schema import init_database
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# the db is set up by the fixture so we need to remove it
conn.conn.drop_database(dbname)
init_database()
collection_names = conn.conn[dbname].collection_names()
assert sorted(collection_names) == ['assets', 'backlog', 'bigchain',
'metadata', 'votes']
indexes = conn.conn[dbname]['bigchain'].index_information().keys()
assert sorted(indexes) == ['_id_', 'asset_id', 'block_id', 'block_timestamp',
'inputs', 'outputs', 'transaction_id']
indexes = conn.conn[dbname]['backlog'].index_information().keys()
assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp',
'transaction_id']
indexes = conn.conn[dbname]['votes'].index_information().keys()
assert sorted(indexes) == ['_id_', 'block_and_voter']
indexes = conn.conn[dbname]['assets'].index_information().keys()
assert sorted(indexes) == ['_id_', 'asset_id', 'text']
indexes = conn.conn[dbname]['metadata'].index_information().keys()
assert sorted(indexes) == ['_id_', 'text', 'transaction_id']
def test_init_database_fails_if_db_exists():
import bigchaindb
from bigchaindb import backend
from bigchaindb.backend.schema import init_database
from bigchaindb.common import exceptions
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by the fixtures
assert dbname in conn.conn.database_names()
with pytest.raises(exceptions.DatabaseAlreadyExists):
init_database()
def test_create_tables():
import bigchaindb
from bigchaindb import backend
from bigchaindb.backend import schema
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by the fixtures so we need to remove it
conn.conn.drop_database(dbname)
schema.create_database(conn, dbname)
schema.create_tables(conn, dbname)
collection_names = conn.conn[dbname].collection_names()
assert sorted(collection_names) == ['assets', 'backlog', 'bigchain',
'metadata', 'votes']
def test_create_secondary_indexes():
import bigchaindb
from bigchaindb import backend
from bigchaindb.backend import schema
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by the fixtures so we need to remove it
conn.conn.drop_database(dbname)
schema.create_database(conn, dbname)
schema.create_tables(conn, dbname)
schema.create_indexes(conn, dbname)
# Bigchain table
indexes = conn.conn[dbname]['bigchain'].index_information().keys()
assert sorted(indexes) == ['_id_', 'asset_id', 'block_id', 'block_timestamp',
'inputs', 'outputs', 'transaction_id']
# Backlog table
indexes = conn.conn[dbname]['backlog'].index_information().keys()
assert sorted(indexes) == ['_id_', 'assignee__transaction_timestamp',
'transaction_id']
# Votes table
indexes = conn.conn[dbname]['votes'].index_information().keys()
assert sorted(indexes) == ['_id_', 'block_and_voter']
def test_drop(dummy_db):
from bigchaindb import backend
from bigchaindb.backend import schema
conn = backend.connect()
assert dummy_db in conn.conn.database_names()
schema.drop_database(conn, dummy_db)
assert dummy_db not in conn.conn.database_names()

View File

@ -1,194 +0,0 @@
"""Tests for the :mod:`bigchaindb.backend.rethinkdb.admin` module."""
import pytest
import rethinkdb as r
def _count_rethinkdb_servers():
from bigchaindb import config
conn = r.connect(host=config['database']['host'],
port=config['database']['port'])
return len(list(r.db('rethinkdb').table('server_status').run(conn)))
@pytest.fixture
def rdb_conn(db_host, db_port, db_name):
return r.connect(host=db_host, port=db_port, db=db_name)
@pytest.mark.bdb
def test_set_shards(rdb_conn, db_name, db_conn):
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.rethinkdb.admin import set_shards
for table in TABLES:
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
assert len(table_config['shards']) == 1
what_happened = set_shards(db_conn, shards=2)
for table in TABLES:
assert what_happened[table]['reconfigured'] == 1
config_changes = what_happened[table]['config_changes']
assert len(config_changes) == 1
assert len(config_changes[0]['new_val']['shards']) == 2
assert len(what_happened[table]['status_changes']) == 1
status_change = what_happened[table]['status_changes'][0]
assert not status_change['new_val']['status']['all_replicas_ready']
table_config = r.db(db_name).table(table).config().run(rdb_conn)
assert len(table_config['shards']) == 2
@pytest.mark.bdb
def test_set_shards_dry_run(rdb_conn, db_name, db_conn):
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.rethinkdb.admin import set_shards
for table in TABLES:
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
assert len(table_config['shards']) == 1
what_happened = set_shards(db_conn, shards=2, dry_run=True)
for table in TABLES:
assert what_happened[table]['reconfigured'] == 0
config_changes = what_happened[table]['config_changes']
assert len(config_changes) == 1
assert len(config_changes[0]['new_val']['shards']) == 2
assert 'status_change' not in what_happened[table]
table_config = r.db(db_name).table(table).config().run(rdb_conn)
assert len(table_config['shards']) == 1
@pytest.mark.bdb
@pytest.mark.skipif(
_count_rethinkdb_servers() < 2,
reason=('Requires at least two servers. It\'s impossible to have'
'more replicas of the data than there are servers.')
)
def test_set_replicas(rdb_conn, db_name, db_conn):
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.rethinkdb.admin import set_replicas
for table in TABLES:
table_config = r.db(db_name).table(table).config().run(rdb_conn)
replicas_before = table_config['shards'][0]['replicas']
assert len(replicas_before) == 1
what_happened = set_replicas(db_conn, replicas=2)
for table in TABLES:
assert what_happened[table]['reconfigured'] == 1
config_changes = what_happened[table]['config_changes']
assert len(config_changes) == 1
assert len(config_changes[0]['new_val']['shards'][0]['replicas']) == 2
assert len(what_happened[table]['status_changes']) == 1
status_change = what_happened[table]['status_changes'][0]
assert not status_change['new_val']['status']['all_replicas_ready']
table_config = r.db(db_name).table(table).config().run(rdb_conn)
assert len(table_config['shards'][0]['replicas']) == 2
assert (table_config['shards'][0]['replicas'][0] !=
table_config['shards'][0]['replicas'][1])
@pytest.mark.bdb
@pytest.mark.skipif(
_count_rethinkdb_servers() < 2,
reason=('Requires at least two servers. It\'s impossible to have'
'more replicas of the data than there are servers.')
)
def test_set_replicas_dry_run(rdb_conn, db_name, db_conn):
from bigchaindb.backend.schema import TABLES
from bigchaindb.backend.rethinkdb.admin import set_replicas
for table in TABLES:
table_config = r.db(db_name).table(table).config().run(rdb_conn)
replicas_before = table_config['shards'][0]['replicas']
assert len(replicas_before) == 1
what_happened = set_replicas(db_conn, replicas=2, dry_run=True)
for table in TABLES:
assert what_happened[table]['reconfigured'] == 0
config_changes = what_happened[table]['config_changes']
assert len(config_changes) == 1
assert len(config_changes[0]['new_val']['shards'][0]['replicas']) == 2
assert 'status_change' not in what_happened[table]
table_config = r.db(db_name).table(table).config().run(rdb_conn)
assert len(table_config['shards'][0]['replicas']) == 1
@pytest.mark.bdb
@pytest.mark.skipif(
_count_rethinkdb_servers() < 2,
reason=('Requires at least two servers. It\'s impossible to have'
'more replicas of the data than there are servers.')
)
def test_reconfigure(rdb_conn, db_name, db_conn):
from bigchaindb.backend.rethinkdb.admin import reconfigure
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
replicas_before = table_config['shards'][0]['replicas']
assert len(replicas_before) == 1
reconfigure(db_conn, table='backlog', shards=2, replicas=2)
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
assert len(table_config['shards'][0]['replicas']) == 2
assert (table_config['shards'][0]['replicas'][0] !=
table_config['shards'][0]['replicas'][1])
@pytest.mark.bdb
def test_reconfigure_shards_for_real(rdb_conn, db_name, db_conn):
from bigchaindb.backend.rethinkdb.admin import reconfigure
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
replicas_before = table_config['shards'][0]['replicas']
assert len(replicas_before) == 1
assert len(table_config['shards']) == 1
what_happened = reconfigure(
db_conn,
table='backlog',
shards=2,
replicas={'default': 1},
primary_replica_tag='default',
nonvoting_replica_tags=('default',),
)
assert what_happened['reconfigured'] == 1
assert len(what_happened['config_changes']) == 1
assert len(what_happened['config_changes'][0]['new_val']['shards']) == 2
assert len(what_happened['status_changes']) == 1
status_change = what_happened['status_changes'][0]
assert not status_change['new_val']['status']['all_replicas_ready']
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
assert len(table_config['shards']) == 2
@pytest.mark.bdb
def test_reconfigure_shards_dry_run(rdb_conn, db_name, db_conn):
from bigchaindb.backend.rethinkdb.admin import reconfigure
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
replicas_before = table_config['shards'][0]['replicas']
assert len(replicas_before) == 1
assert len(table_config['shards']) == 1
what_happened = reconfigure(
db_conn,
table='backlog',
shards=2,
replicas={'default': 1},
primary_replica_tag='default',
nonvoting_replica_tags=('default',),
dry_run=True,
)
assert what_happened['reconfigured'] == 0
assert len(what_happened['config_changes']) == 1
assert len(what_happened['config_changes'][0]['new_val']['shards']) == 2
table_config = r.db(db_name).table('backlog').config().run(rdb_conn)
assert len(table_config['shards']) == 1
@pytest.mark.bdb
def test_reconfigure_replicas_without_nonvoting_replica_tags(rdb_conn,
db_name,
db_conn):
from bigchaindb.backend.rethinkdb.admin import reconfigure
from bigchaindb.backend.exceptions import OperationError
with pytest.raises(OperationError) as exc:
reconfigure(db_conn, table='backlog', shards=1,
replicas={'default': 1}, primary_replica_tag='default')
assert isinstance(exc.value.__cause__, r.ReqlQueryLogicError)
@pytest.mark.bdb
def test_reconfigure_too_many_replicas(rdb_conn, db_name, db_conn):
from bigchaindb.backend.rethinkdb.admin import reconfigure
from bigchaindb.backend.exceptions import OperationError
replicas = _count_rethinkdb_servers() + 1
with pytest.raises(OperationError) as exc:
reconfigure(db_conn, table='backlog', shards=1, replicas=replicas)
assert isinstance(exc.value.__cause__, r.ReqlOpFailedError)

View File

@ -1,95 +0,0 @@
import pytest
from unittest.mock import Mock
from multipipes import Pipe
@pytest.fixture
def mock_changefeed_data():
return [
{
'new_val': 'seems like we have an insert here',
'old_val': None,
}, {
'new_val': None,
'old_val': 'seems like we have a delete here',
}, {
'new_val': 'seems like we have an update here',
'old_val': 'seems like we have an update here',
}
]
@pytest.fixture
def mock_changefeed_connection(mock_changefeed_data):
import bigchaindb
from bigchaindb.backend import connect
connection = connect(**bigchaindb.config['database'])
connection.run = Mock(return_value=mock_changefeed_data)
return connection
def test_changefeed_insert(mock_changefeed_connection):
from bigchaindb.backend import get_changefeed
from bigchaindb.backend.changefeed import ChangeFeed
outpipe = Pipe()
changefeed = get_changefeed(mock_changefeed_connection,
'backlog', ChangeFeed.INSERT)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have an insert here'
assert outpipe.qsize() == 0
def test_changefeed_delete(mock_changefeed_connection):
from bigchaindb.backend import get_changefeed
from bigchaindb.backend.changefeed import ChangeFeed
outpipe = Pipe()
changefeed = get_changefeed(mock_changefeed_connection,
'backlog', ChangeFeed.DELETE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have a delete here'
assert outpipe.qsize() == 0
def test_changefeed_update(mock_changefeed_connection):
from bigchaindb.backend import get_changefeed
from bigchaindb.backend.changefeed import ChangeFeed
outpipe = Pipe()
changefeed = get_changefeed(mock_changefeed_connection,
'backlog', ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have an update here'
assert outpipe.qsize() == 0
def test_changefeed_multiple_operations(mock_changefeed_connection):
from bigchaindb.backend import get_changefeed
from bigchaindb.backend.changefeed import ChangeFeed
outpipe = Pipe()
changefeed = get_changefeed(mock_changefeed_connection, 'backlog',
ChangeFeed.INSERT | ChangeFeed.UPDATE)
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.get() == 'seems like we have an insert here'
assert outpipe.get() == 'seems like we have an update here'
assert outpipe.qsize() == 0
def test_changefeed_prefeed(mock_changefeed_connection):
from bigchaindb.backend import get_changefeed
from bigchaindb.backend.changefeed import ChangeFeed
outpipe = Pipe()
changefeed = get_changefeed(mock_changefeed_connection, 'backlog',
ChangeFeed.INSERT, prefeed=[1, 2, 3])
changefeed.outqueue = outpipe
changefeed.run_forever()
assert outpipe.qsize() == 4

View File

@ -1,164 +0,0 @@
import time
import multiprocessing as mp
from threading import Thread
from unittest.mock import patch
import pytest
import rethinkdb as r
def test_get_connection_returns_the_correct_instance():
from bigchaindb.backend import connect
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
config = {
'backend': 'rethinkdb',
'host': 'localhost',
'port': 28015,
'name': 'test'
}
conn = connect(**config)
assert isinstance(conn, Connection)
assert isinstance(conn, RethinkDBConnection)
def test_run_a_simple_query():
from bigchaindb.backend import connect
conn = connect()
query = r.expr('1')
assert conn.run(query) == '1'
def test_raise_exception_when_max_tries():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import OperationError
class MockQuery:
def run(self, conn):
raise r.ReqlDriverError('mock')
conn = connect()
with pytest.raises(OperationError):
conn.run(MockQuery())
def test_reconnect_when_connection_lost(db_host, db_port):
from bigchaindb.backend import connect
original_connect = r.connect
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
original_connect(host=db_host, port=db_port)
]
conn = connect()
query = r.expr('1')
assert conn.run(query) == '1'
def test_reconnect_when_connection_lost_tries_n_times():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import ConnectionError
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock')
]
conn = connect(max_tries=3)
query = r.expr('1')
with pytest.raises(ConnectionError):
assert conn.run(query) == '1'
def test_changefeed_reconnects_when_connection_lost(monkeypatch):
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.rethinkdb.changefeed import RethinkDBChangeFeed
class MockConnection:
tries = 0
def run(self, *args, **kwargs):
return self
def __iter__(self):
return self
def __next__(self):
self.tries += 1
if self.tries == 1:
raise r.ReqlDriverError('mock')
elif self.tries == 2:
return {'new_val': {'fact':
'A group of cats is called a clowder.'},
'old_val': None}
if self.tries == 3:
raise r.ReqlDriverError('mock')
elif self.tries == 4:
return {'new_val': {'fact': 'Cats sleep 70% of their lives.'},
'old_val': None}
else:
time.sleep(10)
changefeed = RethinkDBChangeFeed('cat_facts', ChangeFeed.INSERT,
connection=MockConnection())
changefeed.outqueue = mp.Queue()
t_changefeed = Thread(target=changefeed.run_forever, daemon=True)
t_changefeed.start()
time.sleep(1)
# try 1: MockConnection raises an error that will stop the
# ChangeFeed instance from iterating for 1 second.
# try 2: MockConnection releases a new record. The new record
# will be put in the outqueue of the ChangeFeed instance.
fact = changefeed.outqueue.get()['fact']
assert fact == 'A group of cats is called a clowder.'
# try 3: MockConnection raises an error that will stop the
# ChangeFeed instance from iterating for 1 second.
assert t_changefeed.is_alive() is True
time.sleep(2)
# try 4: MockConnection releases a new record. The new record
# will be put in the outqueue of the ChangeFeed instance.
fact = changefeed.outqueue.get()['fact']
assert fact == 'Cats sleep 70% of their lives.'
@patch('rethinkdb.connect')
def test_connection_happens_one_time_if_successful(mock_connect):
import bigchaindb
from bigchaindb.backend import connect
timeout = bigchaindb.config['database']['connection_timeout']
query = r.expr('1')
conn = connect('rethinkdb', 'localhost', 1337, 'whatev')
conn.run(query)
mock_connect.assert_called_once_with(host='localhost',
port=1337,
db='whatev',
timeout=timeout)
@patch('rethinkdb.connect', side_effect=r.ReqlTimeoutError())
def test_connection_timeout(mock_connect):
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import ConnectionError
query = r.expr('1')
conn = connect()
# connection should raise a ConnectionError after 3 tries
with pytest.raises(ConnectionError):
conn.run(query)
assert mock_connect.call_count == 3

View File

@ -1,117 +0,0 @@
import pytest
import rethinkdb as r
import bigchaindb
from bigchaindb import backend
from bigchaindb.backend.rethinkdb import schema
@pytest.mark.bdb
def test_init_creates_db_tables_and_indexes():
from bigchaindb.backend.schema import init_database
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by fixtures so we need to remove it
conn.run(r.db_drop(dbname))
init_database()
assert conn.run(r.db_list().contains(dbname)) is True
assert conn.run(r.db(dbname).table_list().contains('backlog', 'bigchain')) is True
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'block_timestamp')) is True
assert conn.run(r.db(dbname).table('backlog').index_list().contains(
'assignee__transaction_timestamp')) is True
@pytest.mark.bdb
def test_init_database_fails_if_db_exists():
from bigchaindb.backend.schema import init_database
from bigchaindb.common import exceptions
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by fixtures
assert conn.run(r.db_list().contains(dbname)) is True
with pytest.raises(exceptions.DatabaseAlreadyExists):
init_database()
def test_create_database(not_yet_created_db):
conn = backend.connect()
schema.create_database(conn, not_yet_created_db)
assert conn.run(r.db_list().contains(not_yet_created_db)) is True
@pytest.mark.bdb
def test_create_tables():
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by fixtures so we need to remove it
# and recreate it just with one table
conn.run(r.db_drop(dbname))
schema.create_database(conn, dbname)
schema.create_tables(conn, dbname)
assert conn.run(r.db(dbname).table_list().contains('bigchain')) is True
assert conn.run(r.db(dbname).table_list().contains('backlog')) is True
assert conn.run(r.db(dbname).table_list().contains('votes')) is True
assert conn.run(r.db(dbname).table_list().contains('assets')) is True
assert conn.run(r.db(dbname).table_list().contains('metadata')) is True
assert len(conn.run(r.db(dbname).table_list())) == 5
@pytest.mark.bdb
def test_create_secondary_indexes():
conn = backend.connect()
dbname = bigchaindb.config['database']['name']
# The db is set up by fixtures so we need to remove it
# and recreate it just with one table
conn.run(r.db_drop(dbname))
schema.create_database(conn, dbname)
schema.create_tables(conn, dbname)
schema.create_indexes(conn, dbname)
# Bigchain table
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'block_timestamp')) is True
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'transaction_id')) is True
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'asset_id')) is True
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'inputs')) is True
assert conn.run(r.db(dbname).table('bigchain').index_list().contains(
'outputs')) is True
# Backlog table
assert conn.run(r.db(dbname).table('backlog').index_list().contains(
'assignee__transaction_timestamp')) is True
# Votes table
assert conn.run(r.db(dbname).table('votes').index_list().contains(
'block_and_voter')) is True
def test_drop(dummy_db):
conn = backend.connect()
assert conn.run(r.db_list().contains(dummy_db)) is True
schema.drop_database(conn, dummy_db)
assert conn.run(r.db_list().contains(dummy_db)) is False
def test_drop_non_existent_db_raises_an_error(dummy_db):
from bigchaindb.common import exceptions
conn = backend.connect()
assert conn.run(r.db_list().contains(dummy_db)) is True
schema.drop_database(conn, dummy_db)
with pytest.raises(exceptions.DatabaseDoesNotExist):
schema.drop_database(conn, dummy_db)

View File

@ -1,6 +1,9 @@
import pytest
pytestmark = pytest.mark.tendermint
def test_get_connection_raises_a_configuration_error(monkeypatch):
from bigchaindb.common.exceptions import ConfigurationError
from bigchaindb.backend import connect

View File

@ -1,6 +1,11 @@
import pytest
from pytest import mark, raises
pytestmark = pytest.mark.tendermint
@mark.parametrize('schema_func_name,args_qty', (
('create_database', 1),
('create_tables', 1),
@ -15,32 +20,15 @@ def test_schema(schema_func_name, args_qty):
@mark.parametrize('query_func_name,args_qty', (
('write_transaction', 1),
('count_blocks', 0),
('count_backlog', 0),
('get_genesis_block', 0),
('delete_transaction', 1),
('get_stale_transactions', 1),
('get_blocks_status_from_transaction', 1),
('get_transaction_from_backlog', 1),
('delete_transactions', 1),
('get_txids_filtered', 1),
('get_asset_by_id', 1),
('get_owned_ids', 1),
('get_votes_by_block_id', 1),
('write_block', 1),
('get_block', 1),
('write_vote', 1),
('get_last_voted_block_id', 1),
('get_spent', 2),
('get_votes_by_block_id_and_voter', 2),
('update_transaction', 2),
('get_transaction_from_block', 2),
('get_new_blocks_feed', 1),
('get_votes_for_blocks_by_voter', 2),
('get_spending_transactions', 1),
('write_assets', 1),
('get_assets', 1),
('write_metadata', 1),
('store_assets', 1),
('get_asset', 1),
('store_metadatas', 1),
('get_metadata', 1),
))
def test_query(query_func_name, args_qty):
@ -48,39 +36,3 @@ def test_query(query_func_name, args_qty):
query_func = getattr(query, query_func_name)
with raises(NotImplementedError):
query_func(None, *range(args_qty))
@mark.parametrize('changefeed_func_name,args_qty', (
('get_changefeed', 2),
))
def test_changefeed(changefeed_func_name, args_qty):
from bigchaindb.backend import changefeed
changefeed_func = getattr(changefeed, changefeed_func_name)
with raises(NotImplementedError):
changefeed_func(None, *range(args_qty))
@mark.parametrize('changefeed_class_func_name,args_qty', (
('run_forever', 0),
('run_changefeed', 0),
))
def test_changefeed_class(changefeed_class_func_name, args_qty):
from bigchaindb.backend.changefeed import ChangeFeed
changefeed_class_func = getattr(ChangeFeed, changefeed_class_func_name)
with raises(NotImplementedError):
changefeed_class_func(None, *range(args_qty))
@mark.parametrize('admin_func_name,kwargs', (
('get_config', {'table': None}),
('reconfigure', {'table': None, 'shards': None, 'replicas': None}),
('set_shards', {'shards': None}),
('set_replicas', {'replicas': None}),
('add_replicas', {'replicas': None}),
('remove_replicas', {'replicas': None}),
))
def test_admin(admin_func_name, kwargs):
from bigchaindb.backend import admin
admin_func = getattr(admin, admin_func_name)
with raises(NotImplementedError):
admin_func(None, **kwargs)

View File

@ -4,6 +4,9 @@ from types import ModuleType
import pytest
pytestmark = pytest.mark.tendermint
@pytest.fixture
def mock_module():
return ModuleType('mock_module')

View File

@ -16,6 +16,7 @@ import pytest
from pymongo import MongoClient
from bigchaindb.common import crypto
from bigchaindb.tendermint.lib import Block
TEST_DB_NAME = 'bigchain_test'
@ -36,12 +37,11 @@ def pytest_runtest_setup(item):
def pytest_addoption(parser):
from bigchaindb.backend.connection import BACKENDS
BACKENDS['mongodb-ssl'] = 'bigchaindb.backend.mongodb.connection.MongoDBConnection'
backends = ', '.join(BACKENDS.keys())
parser.addoption(
'--database-backend',
action='store',
default=os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'rethinkdb'),
default=os.environ.get('BIGCHAINDB_DATABASE_BACKEND', 'localmongodb'),
help='Defines the backend to use (available: {})'.format(backends),
)
@ -50,7 +50,6 @@ def pytest_ignore_collect(path, config):
from bigchaindb.backend.connection import BACKENDS
path = str(path)
BACKENDS['mongodb-ssl'] = 'bigchaindb.backend.mongodb.connection.MongoDBConnection'
supported_backends = BACKENDS.keys()
if os.path.isdir(path):
@ -63,20 +62,19 @@ def pytest_ignore_collect(path, config):
def pytest_configure(config):
config.addinivalue_line(
'markers',
'bdb(): Mark the test as needing BigchainDB, i.e. a database with '
'the three tables: "backlog", "bigchain", "votes". BigchainDB will '
'be configured such that the database and tables are available for an '
'entire test session. For distributed tests, the database name will '
'be suffixed with the process identifier, e.g.: "bigchain_test_gw0", '
'to ensure that each process session has its own separate database.'
'bdb(): Mark the test as needing BigchainDB.'
'BigchainDB will be configured such that the database and tables are available for an '
'entire test session.'
'You need to run a backend (e.g. MongoDB) '
'prior to running tests with this marker. You should not need to restart the backend '
'in between tests runs since the test infrastructure flushes the backend upon session end.'
)
config.addinivalue_line(
'markers',
'genesis(): Mark the test as needing a genesis block in place. The '
'prerequisite steps of configuration and database setup are taken '
'care of at session scope (if needed), prior to creating the genesis '
'block. The genesis block has function scope: it is destroyed after '
'each test function/method.'
'abci(): Mark the test as needing a running ABCI server in place. Use this marker'
'for tests that require a running Tendermint instance. Note that the test infrastructure'
'has no way to reset Tendermint data upon session end - you need to do it manually.'
'Setup performed by this marker includes the steps performed by the bdb marker.'
)
@ -86,12 +84,6 @@ def _bdb_marker(request):
request.getfixturevalue('_bdb')
@pytest.fixture(autouse=True)
def _genesis_marker(request):
if request.keywords.get('genesis', None):
request.getfixturevalue('_genesis')
@pytest.fixture(autouse=True)
def _restore_config(_configure_bigchaindb):
from bigchaindb import config, config_utils
@ -100,29 +92,8 @@ def _restore_config(_configure_bigchaindb):
config_utils.set_config(config_before_test)
@pytest.fixture
def _restore_dbs(request):
from bigchaindb.backend import connect, schema
from bigchaindb.common.exceptions import DatabaseDoesNotExist
from .utils import list_dbs
conn = connect()
dbs_before_test = list_dbs(conn)
yield
dbs_after_test = list_dbs(conn)
dbs_to_delete = (
db for db in set(dbs_after_test) - set(dbs_before_test)
if TEST_DB_NAME not in db
)
print(dbs_to_delete)
for db in dbs_to_delete:
try:
schema.drop_database(conn, db)
except DatabaseDoesNotExist:
pass
@pytest.fixture(scope='session')
def _configure_bigchaindb(request, ssl_context):
def _configure_bigchaindb(request):
import bigchaindb
from bigchaindb import config_utils
test_db_name = TEST_DB_NAME
@ -133,21 +104,6 @@ def _configure_bigchaindb(request, ssl_context):
backend = request.config.getoption('--database-backend')
if backend == 'mongodb-ssl':
bigchaindb._database_map[backend] = {
# we use mongodb as the backend for mongodb-ssl
'backend': 'mongodb',
'connection_timeout': 5000,
'max_tries': 3,
'ssl': True,
'ca_cert': ssl_context.ca,
'crlfile': ssl_context.crl,
'certfile': ssl_context.cert,
'keyfile': ssl_context.key,
'keyfile_passphrase': os.environ.get('BIGCHAINDB_DATABASE_KEYFILE_PASSPHRASE', None)
}
bigchaindb._database_map[backend].update(bigchaindb._base_database_mongodb)
config = {
'database': bigchaindb._database_map[backend],
'keypair': {
@ -193,32 +149,11 @@ def _setup_database(_configure_bigchaindb):
def _bdb(_setup_database, _configure_bigchaindb):
from bigchaindb import config
from bigchaindb.backend import connect
from bigchaindb.backend.admin import get_config
from bigchaindb.backend.schema import TABLES
from .utils import flush_db, update_table_config
from .utils import flush_db
conn = connect()
# TODO remove condition once the mongodb implementation is done
if config['database']['backend'] == 'rethinkdb':
table_configs_before = {
t: get_config(conn, table=t) for t in TABLES
}
yield
dbname = config['database']['name']
flush_db(conn, dbname)
# TODO remove condition once the mongodb implementation is done
if config['database']['backend'] == 'rethinkdb':
for t, c in table_configs_before.items():
update_table_config(conn, t, **c)
@pytest.fixture
def _genesis(_bdb, genesis_block):
# TODO for precision's sake, delete the block once the test is done. The
# deletion is done indirectly via the teardown code of _bdb but explicit
# deletion of the block would make things clearer. E.g.:
# yield
# tests.utils.delete_genesis_block(conn, dbname)
pass
# We need this function to avoid loading an existing
@ -391,57 +326,45 @@ def structurally_valid_vote():
}
@pytest.fixture
def genesis_block(b):
return b.create_genesis_block()
def _get_height(b):
maybe_block = b.get_latest_block()
return 0 if maybe_block is None else maybe_block['height']
@pytest.fixture
def inputs(user_pk, b, genesis_block):
def inputs(user_pk, b):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
prev_block_id = genesis_block.id
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[([user_pk], 1)],
metadata={'msg': random.random()},
).sign([b.me_private])
).sign([b.me_private]).to_dict()
for _ in range(10)
]
block = b.create_block(transactions)
b.write_block(block)
# vote the blocks valid, so that the inputs are valid
vote = b.vote(block.id, prev_block_id, True)
prev_block_id = block.id
b.write_vote(vote)
block = Block(app_hash='', height=_get_height(b), transactions=transactions)
b.store_block(block._asdict())
@pytest.fixture
def inputs_shared(user_pk, user2_pk, genesis_block):
def inputs_shared(user_pk, user2_pk):
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
prev_block_id = genesis_block.id
for block in range(4):
transactions = [
Transaction.create(
[b.me],
[user_pk, user2_pk],
metadata={'msg': random.random()},
).sign([b.me_private])
).sign([b.me_private]).to_dict()
for _ in range(10)
]
block = b.create_block(transactions)
b.write_block(block)
# vote the blocks valid, so that the inputs are valid
vote = b.vote(block.id, prev_block_id, True)
prev_block_id = block.id
b.write_vote(vote)
block = Block(app_hash='', height=_get_height(b), transaction=transactions)
b.store_block(block._asdict())
@pytest.fixture
@ -611,53 +534,6 @@ def abci_server():
abci_proxy.terminate()
@pytest.fixture(scope='session')
def certs_dir():
return os.path.abspath('tests/backend/mongodb-ssl/certs')
@pytest.fixture(scope='session')
def ca_chain_cert(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CA_CERT',
os.path.join(certs_dir, 'ca-chain.cert.pem'))
@pytest.fixture(scope='session')
def ssl_crl(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CRLFILE',
os.path.join(certs_dir, 'crl.pem'))
@pytest.fixture(scope='session')
def ssl_cert(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_CERTFILE',
os.path.join(certs_dir, 'bigchaindb.cert.pem'))
@pytest.fixture(scope='session')
def ssl_key(certs_dir):
return os.environ.get(
'BIGCHAINDB_DATABASE_KEYFILE',
os.path.join(certs_dir, 'bigchaindb.key.pem'))
@pytest.fixture
def mdb_ssl_pem_key(certs_dir):
return os.environ.get(
'BIGCHAINDB_MDB_PEM_KEY_TEST',
os.path.join(certs_dir, 'local-mongo.pem'))
@pytest.fixture(scope='session')
def ssl_context(ca_chain_cert, ssl_crl, ssl_cert, ssl_key):
SSLContext = namedtuple('SSLContext', ('ca', 'crl', 'cert', 'key'))
return SSLContext(
ca=ca_chain_cert, crl=ssl_crl, cert=ssl_cert, key=ssl_key)
@pytest.fixture
def wsserver_config():
from bigchaindb import config
@ -684,15 +560,6 @@ def wsserver_base_url(wsserver_scheme, wsserver_host, wsserver_port):
return '{}://{}:{}'.format(wsserver_scheme, wsserver_host, wsserver_port)
@pytest.fixture
def genesis_tx(b, user_pk):
from bigchaindb.models import Transaction
tx = Transaction.create([b.me], [([user_pk], 1)])
tx.operation = Transaction.GENESIS
genesis_tx = tx.sign([b.me_private])
return genesis_tx
@pytest.fixture
def unspent_output_0():
return {

View File

@ -218,7 +218,7 @@ class TestBigchainApi(object):
def test_text_search(self, b):
from bigchaindb.models import Transaction
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
# define the assets
asset1 = {'msg': 'BigchainDB 1'}
@ -246,7 +246,7 @@ class TestBigchainApi(object):
try:
assets = list(b.text_search('bigchaindb'))
except OperationError as exc:
assert not isinstance(b.connection, MongoDBConnection)
assert not isinstance(b.connection, LocalMongoDBConnection)
else:
assert len(assets) == 3
@ -254,7 +254,7 @@ class TestBigchainApi(object):
def test_text_search_returns_valid_only(self, monkeypatch, b):
from bigchaindb.models import Transaction
from bigchaindb.backend.exceptions import OperationError
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
asset_valid = {'msg': 'Hello BigchainDB!'}
asset_invalid = {'msg': 'Goodbye BigchainDB!'}
@ -285,7 +285,7 @@ class TestBigchainApi(object):
try:
assets = list(b.text_search('bigchaindb'))
except OperationError:
assert not isinstance(b.connection, MongoDBConnection)
assert not isinstance(b.connection, LocalMongoDBConnection)
return
# should only return one asset

View File

@ -1,54 +1,13 @@
from functools import singledispatch
import rethinkdb as r
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
@singledispatch
def list_dbs(connection):
raise NotImplementedError
@list_dbs.register(RethinkDBConnection)
def list_rethink_dbs(connection):
return connection.run(r.db_list())
@list_dbs.register(MongoDBConnection)
def list_mongo_dbs(connection):
raise NotImplementedError
@singledispatch
def flush_db(connection, dbname):
raise NotImplementedError
@flush_db.register(RethinkDBConnection)
def flush_rethink_db(connection, dbname):
try:
connection.run(r.db(dbname).table('bigchain').delete())
connection.run(r.db(dbname).table('backlog').delete())
connection.run(r.db(dbname).table('votes').delete())
connection.run(r.db(dbname).table('assets').delete())
connection.run(r.db(dbname).table('metadata').delete())
except r.ReqlOpFailedError:
pass
@flush_db.register(MongoDBConnection)
def flush_mongo_db(connection, dbname):
connection.conn[dbname].bigchain.delete_many({})
connection.conn[dbname].backlog.delete_many({})
connection.conn[dbname].votes.delete_many({})
connection.conn[dbname].assets.delete_many({})
connection.conn[dbname].metadata.delete_many({})
connection.conn[dbname].utxos.delete_many({})
@flush_db.register(LocalMongoDBConnection)
def flush_localmongo_db(connection, dbname):
connection.conn[dbname].bigchain.delete_many({})
@ -58,13 +17,3 @@ def flush_localmongo_db(connection, dbname):
connection.conn[dbname].metadata.delete_many({})
connection.conn[dbname].utxos.delete_many({})
connection.conn[dbname].validators.delete_many({})
@singledispatch
def update_table_config(connection, table, **kwrgas):
raise NotImplementedError
@update_table_config.register(RethinkDBConnection)
def update_table_config(connection, table, **kwargs):
return connection.run(r.table(table).config().update(dict(**kwargs)))

View File

@ -20,9 +20,9 @@ def test_get_assets_with_missing_text_search(client):
@pytest.mark.genesis
def test_get_assets(client, b):
from bigchaindb.models import Transaction
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
if isinstance(b.connection, MongoDBConnection):
if isinstance(b.connection, LocalMongoDBConnection):
# test returns empty list when no assets are found
res = client.get(ASSETS_ENDPOINT + '?search=abc')
assert res.json == []
@ -57,9 +57,9 @@ def test_get_assets(client, b):
@pytest.mark.genesis
def test_get_assets_limit(client, b):
from bigchaindb.models import Transaction
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
if isinstance(b.connection, MongoDBConnection):
if isinstance(b.connection, LocalMongoDBConnection):
# create two assets
asset1 = {'msg': 'abc 1'}
asset2 = {'msg': 'abc 2'}

View File

@ -60,9 +60,9 @@ def test_post_create_transaction_endpoint(b, client):
def test_post_create_transaction_with_language(b, client, nested, language,
expected_status_code):
from bigchaindb.models import Transaction
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
if isinstance(b.connection, MongoDBConnection):
if isinstance(b.connection, LocalMongoDBConnection):
user_priv, user_pub = crypto.generate_key_pair()
lang_obj = {'language': language}
@ -98,10 +98,10 @@ def test_post_create_transaction_with_language(b, client, nested, language,
def test_post_create_transaction_with_invalid_key(b, client, field, value,
err_key, expected_status_code):
from bigchaindb.models import Transaction
from bigchaindb.backend.mongodb.connection import MongoDBConnection
from bigchaindb.backend.localmongodb.connection import LocalMongoDBConnection
user_priv, user_pub = crypto.generate_key_pair()
if isinstance(b.connection, MongoDBConnection):
if isinstance(b.connection, LocalMongoDBConnection):
if field == 'asset':
tx = Transaction.create([user_pub], [([user_pub], 1)],
asset=value)