diff --git a/CHANGELOG.md b/CHANGELOG.md index 3db903e3..4bfbd8dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,20 @@ For reference, the possible headings are: * **External Contributors** to list contributors outside of BigchainDB GmbH. * **Notes** +## [0.10.2] - 2017-05-16 +Tag name: v0.10.2 + +## Added +* Add Cross Origin Resource Sharing (CORS) support for the HTTP API. + [Commit 6cb7596](https://github.com/bigchaindb/bigchaindb/commit/6cb75960b05403c77bdae0fd327612482589efcb) + +## Fixed +* Fixed `streams_v1` API link in response to `GET /api/v1`. + [Pull Request #1466](https://github.com/bigchaindb/bigchaindb/pull/1466) +* Fixed mismatch between docs and implementation for `GET /blocks?status=` + endpoint. The `status` query parameter is now case insensitive. + [Pull Request #1464](https://github.com/bigchaindb/bigchaindb/pull/1464) + ## [0.10.1] - 2017-04-19 Tag name: v0.10.1 diff --git a/bigchaindb/backend/mongodb/query.py b/bigchaindb/backend/mongodb/query.py index 39d99d4a..926bd7d5 100644 --- a/bigchaindb/backend/mongodb/query.py +++ b/bigchaindb/backend/mongodb/query.py @@ -176,6 +176,25 @@ def get_spent(conn, transaction_id, output): return (elem['block']['transactions'] for elem in cursor) +@register_query(MongoDBConnection) +def get_spending_transactions(conn, inputs): + cursor = conn.run( + conn.collection('bigchain').aggregate([ + {'$match': { + 'block.transactions.inputs.fulfills': { + '$in': inputs, + }, + }}, + {'$unwind': '$block.transactions'}, + {'$match': { + 'block.transactions.inputs.fulfills': { + '$in': inputs, + }, + }}, + ])) + return ((b['id'], b['block']['transactions']) for b in cursor) + + @register_query(MongoDBConnection) def get_owned_ids(conn, owner): cursor = conn.run( @@ -184,9 +203,7 @@ def get_owned_ids(conn, owner): {'$unwind': '$block.transactions'}, {'$match': {'block.transactions.outputs.public_keys': owner}} ])) - # we need to access some nested fields before returning so lets use a - # generator to avoid having to read all records on the cursor at this point - return (elem['block']['transactions'] for elem in cursor) + return ((b['id'], b['block']['transactions']) for b in cursor) @register_query(MongoDBConnection) @@ -197,6 +214,15 @@ def get_votes_by_block_id(conn, block_id): projection={'_id': False})) +@register_query(MongoDBConnection) +def get_votes_for_blocks_by_voter(conn, block_ids, node_pubkey): + return conn.run( + conn.collection('votes') + .find({'vote.voting_for_block': {'$in': block_ids}, + 'node_pubkey': node_pubkey}, + projection={'_id': False})) + + @register_query(MongoDBConnection) def get_votes_by_block_id_and_voter(conn, block_id, node_pubkey): return conn.run( diff --git a/bigchaindb/backend/query.py b/bigchaindb/backend/query.py index 9b2197a5..07101e47 100644 --- a/bigchaindb/backend/query.py +++ b/bigchaindb/backend/query.py @@ -140,6 +140,20 @@ def get_spent(connection, transaction_id, condition_id): raise NotImplementedError +@singledispatch +def get_spending_transactions(connection, inputs): + """Return transactions which spend given inputs + + Args: + inputs (list): list of {txid, output} + + Returns: + Iterator of (block_ids, transaction) for transactions that + spend given inputs. + """ + raise NotImplementedError + + @singledispatch def get_owned_ids(connection, owner): """Retrieve a list of `txids` that can we used has inputs. @@ -148,9 +162,9 @@ def get_owned_ids(connection, owner): owner (str): base58 encoded public key. Returns: - A cursor for the matching transactions. + Iterator of (block_id, transaction) for transactions + that list given owner in conditions. """ - raise NotImplementedError @@ -183,6 +197,20 @@ def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey): raise NotImplementedError +@singledispatch +def get_votes_for_blocks_by_voter(connection, block_ids, pubkey): + """Return votes for many block_ids + + Args: + block_ids (set): block_ids + pubkey (str): public key of voting node + + Returns: + A cursor of votes matching given block_ids and public key + """ + raise NotImplementedError + + @singledispatch def write_block(connection, block): """Write a block to the bigchain table. diff --git a/bigchaindb/backend/rethinkdb/query.py b/bigchaindb/backend/rethinkdb/query.py index 417bcd93..923632fd 100644 --- a/bigchaindb/backend/rethinkdb/query.py +++ b/bigchaindb/backend/rethinkdb/query.py @@ -122,13 +122,14 @@ def get_spent(connection, transaction_id, output): @register_query(RethinkDBConnection) def get_owned_ids(connection, owner): - return connection.run( - r.table('bigchain', read_mode=READ_MODE) + query = (r.table('bigchain', read_mode=READ_MODE) .get_all(owner, index='outputs') .distinct() - .concat_map(lambda doc: doc['block']['transactions']) - .filter(lambda tx: tx['outputs'].contains( + .concat_map(unwind_block_transactions) + .filter(lambda doc: doc['tx']['outputs'].contains( lambda c: c['public_keys'].contains(owner)))) + cursor = connection.run(query) + return ((b['id'], b['tx']) for b in cursor) @register_query(RethinkDBConnection) @@ -266,3 +267,30 @@ def get_unvoted_blocks(connection, node_pubkey): # database level. Solving issue #444 can help untangling the situation unvoted_blocks = filter(lambda block: not utils.is_genesis_block(block), unvoted) return unvoted_blocks + + +@register_query(RethinkDBConnection) +def get_votes_for_blocks_by_voter(connection, block_ids, node_pubkey): + return connection.run( + r.table('votes') + .filter(lambda row: r.expr(block_ids).contains(row['vote']['voting_for_block'])) + .filter(lambda row: row['node_pubkey'] == node_pubkey)) + + +def unwind_block_transactions(block): + """ Yield a block for each transaction in given block """ + return block['block']['transactions'].map(lambda tx: block.merge({'tx': tx})) + + +@register_query(RethinkDBConnection) +def get_spending_transactions(connection, links): + query = ( + r.table('bigchain') + .get_all(*[(l['txid'], l['output']) for l in links], index='inputs') + .concat_map(unwind_block_transactions) + # filter transactions spending output + .filter(lambda doc: r.expr(links).set_intersection( + doc['tx']['inputs'].map(lambda i: i['fulfills']))) + ) + cursor = connection.run(query) + return ((b['id'], b['tx']) for b in cursor) diff --git a/bigchaindb/common/transaction.py b/bigchaindb/common/transaction.py index 6e271d60..365fee8f 100644 --- a/bigchaindb/common/transaction.py +++ b/bigchaindb/common/transaction.py @@ -161,6 +161,9 @@ class TransactionLink(object): # TODO: If `other !== TransactionLink` return `False` return self.to_dict() == other.to_dict() + def __hash__(self): + return hash((self.txid, self.output)) + @classmethod def from_dict(cls, link): """Transforms a Python dictionary to a TransactionLink object. diff --git a/bigchaindb/config_utils.py b/bigchaindb/config_utils.py index 5a72a7d6..57d10f74 100644 --- a/bigchaindb/config_utils.py +++ b/bigchaindb/config_utils.py @@ -238,7 +238,10 @@ def autoconfigure(filename=None, config=None, force=False): try: newconfig = update(newconfig, file_config(filename=filename)) except FileNotFoundError as e: - logger.warning('Cannot find config file `%s`.' % e.filename) + if filename: + raise + else: + logger.info('Cannot find config file `%s`.' % e.filename) # override configuration with env variables newconfig = env_config(newconfig) diff --git a/bigchaindb/core.py b/bigchaindb/core.py index b2c8d398..39039cc0 100644 --- a/bigchaindb/core.py +++ b/bigchaindb/core.py @@ -4,11 +4,10 @@ from time import time from bigchaindb import exceptions as core_exceptions from bigchaindb.common import crypto, exceptions from bigchaindb.common.utils import gen_timestamp, serialize -from bigchaindb.common.transaction import TransactionLink import bigchaindb -from bigchaindb import backend, config_utils, utils +from bigchaindb import backend, config_utils, fastquery from bigchaindb.consensus import BaseConsensusRules from bigchaindb.models import Block, Transaction @@ -390,51 +389,6 @@ class Bigchain(object): # Either no transaction was returned spending the `(txid, output)` as # input or the returned transactions are not valid. - def get_outputs(self, owner): - """Retrieve a list of links to transaction outputs for a given public - key. - - Args: - owner (str): base58 encoded public key. - - Returns: - :obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s - pointing to another transaction's condition - """ - # get all transactions in which owner is in the `owners_after` list - response = backend.query.get_owned_ids(self.connection, owner) - return [ - TransactionLink(tx['id'], index) - for tx in response - if not self.is_tx_strictly_in_invalid_block(tx['id']) - for index, output in enumerate(tx['outputs']) - if utils.output_has_owner(output, owner) - ] - - def is_tx_strictly_in_invalid_block(self, txid): - """ - Checks whether the transaction with the given ``txid`` - *strictly* belongs to an invalid block. - - Args: - txid (str): Transaction id. - - Returns: - bool: ``True`` if the transaction *strictly* belongs to a - block that is invalid. ``False`` otherwise. - - Note: - Since a transaction may be in multiple blocks, with - different statuses, the term "strictly" is used to - emphasize that if a transaction is said to be in an invalid - block, it means that it is not in any other block that is - either valid or undecided. - - """ - validity = self.get_blocks_status_containing_tx(txid) - return (Bigchain.BLOCK_VALID not in validity.values() and - Bigchain.BLOCK_UNDECIDED not in validity.values()) - def get_owned_ids(self, owner): """Retrieve a list of ``txid`` s that can be used as inputs. @@ -447,14 +401,17 @@ class Bigchain(object): """ return self.get_outputs_filtered(owner, include_spent=False) + @property + def fastquery(self): + return fastquery.FastQuery(self.connection, self.me) + def get_outputs_filtered(self, owner, include_spent=True): """ Get a list of output links filtered on some criteria """ - outputs = self.get_outputs(owner) + outputs = self.fastquery.get_outputs_by_public_key(owner) if not include_spent: - outputs = [o for o in outputs - if not self.get_spent(o.txid, o.output)] + outputs = self.fastquery.filter_spent_outputs(outputs) return outputs def get_transactions_filtered(self, asset_id, operation=None): diff --git a/bigchaindb/fastquery.py b/bigchaindb/fastquery.py new file mode 100644 index 00000000..d19294ce --- /dev/null +++ b/bigchaindb/fastquery.py @@ -0,0 +1,70 @@ +from bigchaindb.utils import output_has_owner +from bigchaindb.backend import query +from bigchaindb.common.transaction import TransactionLink + + +class FastQuery: + """ + Database queries that join on block results from a single node. + + * Votes are not validated for security (security is a replication concern) + * Votes come from only one node, and as such, non-byzantine fault tolerance + is reduced. + + Previously, to consider the status of a block, all votes for that block + were retrieved and the election results were counted. This meant that a + faulty node may still have been able to obtain a correct election result. + However, from the point of view of a client, it is still neccesary to + query multiple nodes to insure against getting an incorrect response from + a byzantine node. + """ + def __init__(self, connection, me): + self.connection = connection + self.me = me + + def filter_valid_block_ids(self, block_ids, include_undecided=False): + """ + Given block ids, return only the ones that are valid. + """ + block_ids = list(set(block_ids)) + votes = query.get_votes_for_blocks_by_voter( + self.connection, block_ids, self.me) + votes = {vote['vote']['voting_for_block']: vote['vote']['is_block_valid'] + for vote in votes} + return [block_id for block_id in block_ids + if votes.get(block_id, include_undecided)] + + def filter_valid_items(self, items, block_id_key=lambda b: b[0]): + """ + Given items with block ids, return only the ones that are valid or undecided. + """ + items = list(items) + block_ids = map(block_id_key, items) + valid_block_ids = set(self.filter_valid_block_ids(block_ids, True)) + return [b for b in items if block_id_key(b) in valid_block_ids] + + def get_outputs_by_public_key(self, public_key): + """ + Get outputs for a public key + """ + res = list(query.get_owned_ids(self.connection, public_key)) + txs = [tx for _, tx in self.filter_valid_items(res)] + return [TransactionLink(tx['id'], index) + for tx in txs + for index, output in enumerate(tx['outputs']) + if output_has_owner(output, public_key)] + + def filter_spent_outputs(self, outputs): + """ + Remove outputs that have been spent + + Args: + outputs: list of TransactionLink + """ + links = [o.to_dict() for o in outputs] + res = query.get_spending_transactions(self.connection, links) + txs = [tx for _, tx in self.filter_valid_items(res)] + spends = {TransactionLink.from_dict(input_['fulfills']) + for tx in txs + for input_ in tx['inputs']} + return [ff for ff in outputs if ff not in spends] diff --git a/bigchaindb/web/server.py b/bigchaindb/web/server.py index 46495368..3c33a33a 100644 --- a/bigchaindb/web/server.py +++ b/bigchaindb/web/server.py @@ -7,6 +7,7 @@ import copy import multiprocessing from flask import Flask +from flask_cors import CORS import gunicorn.app.base from bigchaindb import utils @@ -60,6 +61,21 @@ def create_app(*, debug=False, threads=4): app = Flask(__name__) + CORS(app, + allow_headers=( + 'x-requested-with', + 'content-type', + 'accept', + 'origin', + 'authorization', + 'x-csrftoken', + 'withcredentials', + 'cache-control', + 'cookie', + 'session-id', + ), + supports_credentials=True) + app.debug = debug app.config['bigchain_pool'] = utils.pool(Bigchain, size=threads) diff --git a/bigchaindb/web/views/base.py b/bigchaindb/web/views/base.py index 0c226d7d..e4ae980b 100644 --- a/bigchaindb/web/views/base.py +++ b/bigchaindb/web/views/base.py @@ -28,4 +28,7 @@ def base_url(): def base_ws_uri(): """Base websocket uri.""" - return 'ws://{host}:{port}'.format(**config['wsserver']) + # TODO Revisit as this is a workaround to address issue + # https://github.com/bigchaindb/bigchaindb/issues/1465. + host = request.environ['HTTP_HOST'].split(':')[0] + return 'ws://{}:{}'.format(host, config['wsserver']['port']) diff --git a/bigchaindb/web/views/blocks.py b/bigchaindb/web/views/blocks.py index 1ea1a28f..2471739d 100644 --- a/bigchaindb/web/views/blocks.py +++ b/bigchaindb/web/views/blocks.py @@ -42,7 +42,7 @@ class BlockListApi(Resource): """ parser = reqparse.RequestParser() parser.add_argument('tx_id', type=str, required=True) - parser.add_argument('status', type=str, + parser.add_argument('status', type=str, case_sensitive=False, choices=[Bigchain.BLOCK_VALID, Bigchain.BLOCK_INVALID, Bigchain.BLOCK_UNDECIDED]) args = parser.parse_args(strict=True) diff --git a/docs/server/source/cloud-deployment-templates/azure-quickstart-template.md b/docs/server/source/appendices/azure-quickstart-template.md similarity index 93% rename from docs/server/source/cloud-deployment-templates/azure-quickstart-template.md rename to docs/server/source/appendices/azure-quickstart-template.md index 1bf35a31..59f52fd3 100644 --- a/docs/server/source/cloud-deployment-templates/azure-quickstart-template.md +++ b/docs/server/source/appendices/azure-quickstart-template.md @@ -1,6 +1,6 @@ # Azure Quickstart Template -If you didn't read the introduction to the [cloud deployment templates](index.html), please do that now. The main point is that they're not for deploying a production node; they can be used as a starting point. +This page outlines how to run a single BigchainDB node on the Microsoft Azure public cloud, with RethinkDB as the database backend. It uses an Azure Quickstart Template. That template is dated because we now recommend using MongoDB instead of RethinkDB. That's why we moved this page to the Appendices. Note: There was an Azure quickstart template in the `blockchain` directory of Microsoft's `Azure/azure-quickstart-templates` repository on GitHub. It's gone now; it was replaced by the one described here. diff --git a/docs/server/source/appendices/index.rst b/docs/server/source/appendices/index.rst index 1c969c05..f5931e64 100755 --- a/docs/server/source/appendices/index.rst +++ b/docs/server/source/appendices/index.rst @@ -18,6 +18,9 @@ Appendices backend commands aws-setup + template-terraform-aws + template-ansible + azure-quickstart-template generate-key-pair-for-ssh firewall-notes ntp-notes diff --git a/docs/server/source/cloud-deployment-templates/template-ansible.md b/docs/server/source/appendices/template-ansible.md similarity index 94% rename from docs/server/source/cloud-deployment-templates/template-ansible.md rename to docs/server/source/appendices/template-ansible.md index f296a2cf..508d0555 100644 --- a/docs/server/source/cloud-deployment-templates/template-ansible.md +++ b/docs/server/source/appendices/template-ansible.md @@ -1,9 +1,9 @@ # Template: Ansible Playbook to Run a BigchainDB Node on an Ubuntu Machine -If you didn't read the introduction to the [cloud deployment templates](index.html), please do that now. The main point is that they're not for deploying a production node; they can be used as a starting point. - This page explains how to use [Ansible](https://www.ansible.com/) to install, configure and run all the software needed to run a one-machine BigchainDB node on a server running Ubuntu 16.04. +**Note: We're not actively maintaining the associated Ansible files (e.g. playbooks). They are RethinkDB-specific, even though we now recommend using MongoDB. You may find the old Ansible stuff useful nevertheless, which is why we moved this page to the Appendices rather than deleting it.** + ## Install Ansible diff --git a/docs/server/source/cloud-deployment-templates/template-terraform-aws.md b/docs/server/source/appendices/template-terraform-aws.md similarity index 95% rename from docs/server/source/cloud-deployment-templates/template-terraform-aws.md rename to docs/server/source/appendices/template-terraform-aws.md index d4a22e83..055a5ee3 100644 --- a/docs/server/source/cloud-deployment-templates/template-terraform-aws.md +++ b/docs/server/source/appendices/template-terraform-aws.md @@ -1,8 +1,8 @@ # Template: Using Terraform to Provision an Ubuntu Machine on AWS -If you didn't read the introduction to the [cloud deployment templates](index.html), please do that now. The main point is that they're not for deploying a production node; they can be used as a starting point. +This page explains a way to use [Terraform](https://www.terraform.io/) to provision an Ubuntu machine (i.e. an EC2 instance with Ubuntu 16.04) and other resources on [AWS](https://aws.amazon.com/). That machine can then be used to host a one-machine BigchainDB node, for example. -This page explains a way to use [Terraform](https://www.terraform.io/) to provision an Ubuntu machine (i.e. an EC2 instance with Ubuntu 16.04) and other resources on [AWS](https://aws.amazon.com/). That machine can then be used to host a one-machine BigchainDB node. +**Note: We're not actively maintaining the associated Terraform files. You may find them useful nevertheless, which is why we moved this page to the Appendices rather than deleting it.** ## Install Terraform diff --git a/docs/server/source/cloud-deployment-templates/ca-installation.rst b/docs/server/source/cloud-deployment-templates/ca-installation.rst new file mode 100644 index 00000000..9ea38477 --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/ca-installation.rst @@ -0,0 +1,89 @@ +How to Set Up a Self-Signed Certificate Authority +================================================= + +This page enumerates the steps *we* use to set up a self-signed certificate authority (CA). +This is something that only needs to be done once per cluster, +by the organization managing the cluster, i.e. the CA is for the whole cluster. +We use Easy-RSA. + + +Step 1: Install & Configure Easy-RSA +------------------------------------ + +First create a directory for the CA and cd into it: + +.. code:: bash + + mkdir bdb-cluster-ca + + cd bdb-cluster-ca + +Then :ref:`install and configure Easy-RSA in that directory `. + + +Step 2: Create a Self-Signed CA +------------------------------- + +You can create a self-signed CA +by going to the ``bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3`` directory and using: + +.. code:: bash + + ./easyrsa init-pki + + ./easyrsa build-ca + + +You will be asked to enter a PEM pass phrase for encrypting the ``ca.key`` file. +Make sure to securely store that PEM pass phrase. +If you lose it, you won't be able to add or remove entities from your PKI infrastructure in the future. + +It will ask several other questions. +You can accept all the defaults [in brackets] by pressing Enter. +While ``Easy-RSA CA`` *is* a valid and acceptable Common Name, +you should probably enter a name based on the name of the managing organization, +e.g. ``Omega Ledger CA``. + +Tip: You can get help with the ``easyrsa`` command (and its subcommands) +by using the subcommand ``./easyrsa help`` + + +Step 3: Create an Intermediate CA +--------------------------------- + +TODO(Krish) + +Step 4: Generate a Certificate Revocation List +---------------------------------------------- + +You can generate a Certificate Revocation List (CRL) using: + +.. code:: bash + + ./easyrsa gen-crl + +You will need to run this command every time you revoke a certificate and the +generated ``crl.pem`` needs to be uploaded to your infrastructure to prevent +the revoked certificate from being used again. + + +Step 5: Secure the CA +--------------------- + +The security of your infrastructure depends on the security of this CA. + +- Ensure that you restrict access to the CA and enable only legitimate and + required people to sign certificates and generate CRLs. + +- Restrict access to the machine where the CA is hosted. + +- Many certificate providers keep the CA offline and use a rotating + intermediate CA to sign and revoke certificates, to mitigate the risk of the + CA getting compromised. + +- In case you want to destroy the machine where you created the CA + (for example, if this was set up on a cloud provider instance), + you can backup the entire ``easyrsa`` directory + to secure storage. You can always restore it to a trusted instance again + during the times when you want to sign or revoke certificates. + Remember to backup the directory after every update. diff --git a/docs/server/source/cloud-deployment-templates/client-tls-certificate.rst b/docs/server/source/cloud-deployment-templates/client-tls-certificate.rst new file mode 100644 index 00000000..60a754a0 --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/client-tls-certificate.rst @@ -0,0 +1,77 @@ +How to Generate a Client Certificate for MongoDB +================================================ + +This page enumerates the steps *we* use +to generate a client certificate +to be used by clients who want to connect to a TLS-secured MongoDB cluster. +We use Easy-RSA. + + +Step 1: Install and Configure Easy-RSA +-------------------------------------- + +First create a directory for the client certificate and cd into it: + +.. code:: bash + + mkdir client-cert + + cd client-cert + +Then :ref:`install and configure Easy-RSA in that directory `. + + +Step 2: Create the Client Private Key and CSR +--------------------------------------------- + +You can create the client private key and certificate signing request (CSR) +by going into the directory ``client-cert/easy-rsa-3.0.1/easyrsa`` +and using: + +.. code:: bash + + ./easyrsa init-pki + + ./easyrsa gen-req bdb-instance-0 nopass + + +You should change ``bdb-instance-0`` to a value based on the client +the certificate is for. + +Tip: You can get help with the ``easyrsa`` command (and its subcommands) +by using the subcommand ``./easyrsa help`` + + +Step 3: Get the Client Certificate Signed +----------------------------------------- + +The CSR file (created in the last step) +should be located in ``pki/reqs/bdb-instance-0.req``. +You need to send it to the organization managing the cluster +so that they can use their CA +to sign the request. +(The managing organization should already have a self-signed CA.) + +If you are the admin of the managing organization's self-signed CA, +then you can import the CSR and use Easy-RSA to sign it. For example: + +.. code:: bash + + ./easyrsa import-req bdb-instance-0.req bdb-instance-0 + + ./easyrsa sign-req client bdb-instance-0 + +Once you have signed it, you can send the signed certificate +and the CA certificate back to the requestor. +The files are ``pki/issued/bdb-instance-0.crt`` and ``pki/ca.crt``. + + +Step 4: Generate the Consolidated Client PEM File +------------------------------------------------- + +MongoDB requires a single, consolidated file containing both the public and +private keys. + +.. code:: bash + + cat bdb-instance-0.crt bdb-instance-0.key > bdb-instance-0.pem diff --git a/docs/server/source/cloud-deployment-templates/easy-rsa.rst b/docs/server/source/cloud-deployment-templates/easy-rsa.rst new file mode 100644 index 00000000..50a62cd5 --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/easy-rsa.rst @@ -0,0 +1,84 @@ +How to Install & Configure Easy-RSA +=================================== + +We use +`Easy-RSA version 3 +`_, a +wrapper over complex ``openssl`` commands. +`Easy-RSA is available on GitHub `_ and licensed under GPLv2. + + +Step 1: Install Easy-RSA Dependencies +------------------------------------- + +The only dependency for Easy-RSA v3 is ``openssl``, +which is available from the ``openssl`` package on Ubuntu and other +Debian-based operating systems, i.e. you can install it using: + +.. code:: bash + + sudo apt-get update + + sudo apt-get install openssl + + +Step 2: Install Easy-RSA +------------------------ + +Make sure you're in the directory where you want Easy-RSA to live, +then download it and extract it within that directory: + +.. code:: bash + + wget https://github.com/OpenVPN/easy-rsa/archive/3.0.1.tar.gz + + tar xzvf 3.0.1.tar.gz + + rm 3.0.1.tar.gz + +There should now be a directory named ``easy-rsa-3.0.1`` +in your current directory. + + +Step 3: Customize the Easy-RSA Configuration +-------------------------------------------- + +We now create a config file named ``vars`` +by copying the existing ``vars.example`` file +and then editing it. +You should change the +country, province, city, org and email +to the correct values for you. +(Note: The country, province, city, org and email are part of +the `Distinguished Name `_ (DN).) +The comments in the file explain what the variables mean. + +.. code:: bash + + cd easy-rsa-3.0.1/easyrsa3 + + cp vars.example vars + + echo 'set_var EASYRSA_DN "org"' >> vars + echo 'set_var EASYRSA_REQ_OU "IT"' >> vars + echo 'set_var EASYRSA_KEY_SIZE 4096' >> vars + + echo 'set_var EASYRSA_REQ_COUNTRY "DE"' >> vars + echo 'set_var EASYRSA_REQ_PROVINCE "Berlin"' >> vars + echo 'set_var EASYRSA_REQ_CITY "Berlin"' >> vars + echo 'set_var EASYRSA_REQ_ORG "BigchainDB GmbH"' >> vars + echo 'set_var EASYRSA_REQ_EMAIL "dev@bigchaindb.com"' >> vars + + +Step 4: Maybe Edit x509-types/server +------------------------------------ + +.. warning:: + + Only do this step if you are setting up a self-signed CA + or creating a server/member certificate. + +Edit the file ``x509-types/server`` and change +``extendedKeyUsage = serverAuth`` to +``extendedKeyUsage = serverAuth,clientAuth``. +See `the MongoDB documentation about x.509 authentication `_ to understand why. diff --git a/docs/server/source/cloud-deployment-templates/first-node.rst b/docs/server/source/cloud-deployment-templates/first-node.rst index 9130696a..298d7ceb 100644 --- a/docs/server/source/cloud-deployment-templates/first-node.rst +++ b/docs/server/source/cloud-deployment-templates/first-node.rst @@ -4,6 +4,7 @@ First Node or Bootstrap Node Setup This document is a work in progress and will evolve over time to include security, websocket and other settings. + Step 1: Set Up the Cluster -------------------------- @@ -421,8 +422,12 @@ Step 17. Verify that the Cluster is Correctly Set Up nslookup bdb-instance-0 dig +noall +answer _bdb-port._tcp.bdb-instance-0.default.svc.cluster.local SRV + + dig +noall +answer _bdb-ws-port._tcp.bdb-instance-0.default.svc.cluster.local SRV curl -X GET http://bdb-instance-0:9984 + + wsc ws://bdb-instance-0:9985/api/v1/streams/valid_tx * Verify NGINX instance @@ -435,12 +440,16 @@ Step 17. Verify that the Cluster is Correctly Set Up curl -X GET http://ngx-instance-0:27017 # results in curl: (56) Recv failure: Connection reset by peer dig +noall +answer _ngx-public-bdb-port._tcp.ngx-instance-0.default.svc.cluster.local SRV + + dig +noall +answer _ngx-public-ws-port._tcp.ngx-instance-0.default.svc.cluster.local SRV * If you have run the vanilla NGINX instance, run .. code:: bash curl -X GET http://ngx-instance-0:80 + + wsc ws://ngx-instance-0:81/api/v1/streams/valid_tx * If you have the OpenResty NGINX + 3scale instance, run @@ -448,7 +457,7 @@ Step 17. Verify that the Cluster is Correctly Set Up curl -X GET https://ngx-instance-0 - * Check the MongoDB monitoring and backup agent on the MOngoDB Coud Manager portal to verify they are working fine. + * Check the MongoDB monitoring and backup agent on the MongoDB Coud Manager portal to verify they are working fine. * Send some transactions to BigchainDB and verify it's up and running! diff --git a/docs/server/source/cloud-deployment-templates/index.rst b/docs/server/source/cloud-deployment-templates/index.rst index 41eec0ed..a94fab94 100644 --- a/docs/server/source/cloud-deployment-templates/index.rst +++ b/docs/server/source/cloud-deployment-templates/index.rst @@ -1,21 +1,27 @@ -Cloud Deployment Templates -========================== +Production Deployment Template +============================== -We have some "templates" to deploy a basic, working, but bare-bones BigchainDB node on various cloud providers. They should *not* be used as-is to deploy a node for production. They can be used as a starting point. +This section outlines how *we* deploy production BigchainDB nodes and clusters +on Microsoft Azure +using Kubernetes. +We improve it constantly. +You may choose to use it as a template or reference for your own deployment, +but *we make no claim that it is suitable for your purposes*. +Feel free change things to suit your needs or preferences. -You don't have to use the tools we use in the templates. You can use whatever tools you prefer. - -If you find the cloud deployment templates for nodes helpful, then you may also be interested in our scripts for :doc:`deploying a testing cluster on AWS <../clusters-feds/aws-testing-cluster>` (documented in the Clusters section). .. toctree:: :maxdepth: 1 - template-terraform-aws - template-ansible - azure-quickstart-template + workflow + ca-installation + server-tls-certificate + client-tls-certificate + revoke-tls-certificate template-kubernetes-azure node-on-kubernetes add-node-on-kubernetes upgrade-on-kubernetes first-node log-analytics + easy-rsa diff --git a/docs/server/source/cloud-deployment-templates/log-analytics.rst b/docs/server/source/cloud-deployment-templates/log-analytics.rst index fbef70d2..1f5d5596 100644 --- a/docs/server/source/cloud-deployment-templates/log-analytics.rst +++ b/docs/server/source/cloud-deployment-templates/log-analytics.rst @@ -3,11 +3,10 @@ Log Analytics on Azure This section documents how to create and configure a Log Analytics workspace on Azure, for a Kubernetes-based deployment. - The documented approach is based on an integration of Microsoft's Operations Management Suite (OMS) with a Kubernetes-based Azure Container Service cluster. -The :ref:`oms-k8s-references` contains links to more detailed documentation on +The :ref:`oms-k8s-references` section (below) contains links to more detailed documentation on Azure, and Kubernetes. There are three main steps involved: @@ -23,9 +22,9 @@ one template so we'll cover them together. Step 3 relies on a Minimum Requirements -------------------- This document assumes that you have already deployed a Kubernetes cluster, and -that you have the Kubernetes command line ``kubectl`` installed. +that you have the Kubernetes command line interface ``kubectl`` installed. -Creating a workspace and adding a containers solution +Creating a Workspace and Adding a Containers Solution ----------------------------------------------------- For the sake of this document and example, we'll assume an existing resource group named: @@ -46,7 +45,7 @@ If you feel creative you may replace these names by more interesting ones. --template-file log_analytics_oms.json \ --parameters @log_analytics_oms.parameters.json -An example of a simple tenplate file (``--template-file``): +An example of a simple template file (``--template-file``): .. code-block:: json @@ -120,14 +119,14 @@ An example of the associated parameter file (``--parameters``): } } -Deploying the OMS agent(s) --------------------------- -In order to deploy an OMS agent two important pieces of information are needed: +Deploy the OMS Agents +--------------------- +To deploy an OMS agent, two important pieces of information are needed: * workspace id * workspace key -Obtaining the workspace id: +You can obtain the workspace id using: .. code-block:: bash @@ -138,13 +137,17 @@ Obtaining the workspace id: | grep customerId "customerId": "12345678-1234-1234-1234-123456789012", -Obtaining the workspace key: +Until we figure out a way to obtain the *workspace key* via the command line, +you can get it via the OMS Portal. +To get to the OMS Portal, go to the Azure Portal and click on: -Until we figure out a way to this via the command line please see instructions -under `Obtain your workspace ID and key -`_. +Resource Groups > (Your k8s cluster's resource group) > Log analytics (OMS) > (Name of the only item listed) > OMS Workspace > OMS Portal -Once you have the workspace id and key you can include them in the following +(Let us know if you find a faster way.) +Then see `Microsoft's instructions to obtain your workspace ID and key +`_ (via the OMS Portal). + +Once you have the workspace id and key, you can include them in the following YAML file (:download:`oms-daemonset.yaml <../../../../k8s/logging-and-monitoring/oms-daemonset.yaml>`): @@ -182,14 +185,44 @@ YAML file (:download:`oms-daemonset.yaml hostPath: path: /var/run/docker.sock -To deploy the agent simply run the following command: +To deploy the OMS agents (one per Kubernetes node, i.e. one per computer), +simply run the following command: .. code-block:: bash $ kubectl create -f oms-daemonset.yaml -Some useful management tasks +Create an Email Alert +--------------------- + +Suppose you want to get an email whenever there's a logging message +with the CRITICAL or ERROR logging level from any container. +At the time of writing, it wasn't possible to create email alerts +using the Azure Portal (as far as we could tell), +but it *was* possible using the OMS Portal. +(There are instructions to get to the OMS Portal +in the section titled :ref:`Deploy the OMS Agents` above.) +Once you're in the OMS Portal, click on **Log Search** +and enter the query string: + +``Type=ContainerLog (critical OR error)`` + +If you don't see any query results, +try experimenting with the query string and time range +to convince yourself that it's working. +For query syntax help, see the +`Log Analytics search reference `_. +If you want to exclude the "404 Not Found" errors, +use the query string +"Type=ContainerLog (critical OR error) NOT(404)". +Once you're satisfied with the query string, +click the **🔔 Alert** icon in the top menu, +fill in the form, +and click **Save** when you're done. + + +Some Useful Management Tasks ---------------------------- List workspaces: @@ -207,7 +240,7 @@ List solutions: --resource-group resource_group \ --resource-type Microsoft.OperationsManagement/solutions -Deleting the containers solution: +Delete the containers solution: .. code-block:: bash @@ -222,7 +255,7 @@ Deleting the containers solution: --resource-type Microsoft.OperationsManagement/solutions \ --name "Containers(work_space)" -Deleting the workspace: +Delete the workspace: .. code-block:: bash diff --git a/docs/server/source/cloud-deployment-templates/revoke-tls-certificate.rst b/docs/server/source/cloud-deployment-templates/revoke-tls-certificate.rst new file mode 100644 index 00000000..5c566c97 --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/revoke-tls-certificate.rst @@ -0,0 +1,42 @@ +How to Revoke an SSL/TLS Certificate +==================================== + +This page enumerates the steps *we* take to revoke a self-signed SSL/TLS certificate +in a cluster. +It can only be done by someone with access to the self-signed CA +associated with the cluster's managing organization. + +Step 1: Revoke a Certificate +---------------------------- + +Since we used Easy-RSA version 3 to +:ref:`set up the CA `, +we use it to revoke certificates too. + +Go to the following directory (associated with the self-signed CA): +``.../bdb-cluster-ca/easy-rsa-3.0.1/easyrsa3``. +You need to be aware of the file name used to import the certificate using the +``./easyrsa import-req`` before. Run the following command to revoke a +certificate: + +.. code:: bash + + ./easyrsa revoke + +This will update the CA database with the revocation details. +The next step is to use the updated database to issue an up-to-date +certificate revocation list (CRL). + + +Step 2: Generate a New CRL +-------------------------- + +Generate a new CRL for your infrastructure using: + +.. code:: bash + + ./easyrsa gen-crl + +The generated ``crl.pem`` file needs to be uploaded to your infrastructure to +prevent the revoked certificate from being used again. + diff --git a/docs/server/source/cloud-deployment-templates/server-tls-certificate.rst b/docs/server/source/cloud-deployment-templates/server-tls-certificate.rst new file mode 100644 index 00000000..b9cb1a14 --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/server-tls-certificate.rst @@ -0,0 +1,92 @@ +How to Generate a Server Certificate for MongoDB +================================================ + +This page enumerates the steps *we* use to generate a +server certificate for a MongoDB instance. +A server certificate is also referred to as a "member certificate" +in the MongoDB documentation. +We use Easy-RSA. + + +Step 1: Install & Configure Easy–RSA +------------------------------------ + +First create a directory for the server certificate (member cert) and cd into it: + +.. code:: bash + + mkdir member-cert + + cd member-cert + +Then :ref:`install and configure Easy-RSA in that directory `. + + +Step 2: Create the Server Private Key and CSR +--------------------------------------------- + +You can create the server private key and certificate signing request (CSR) +by going into the directory ``member-cert/easy-rsa-3.0.1/easyrsa`` +and using something like: + +.. code:: bash + + ./easyrsa init-pki + + ./easyrsa --req-cn=mdb-instance-0 --subject-alt-name=DNS:localhost,DNS:mdb-instance-0 gen-req mdb-instance-0 nopass + +You must replace the common name (``mdb-instance-0`` above) +with the common name of *your* MongoDB instance +(which should be the same as the hostname of your MongoDB instance). + +You need to provide the ``DNS:localhost`` SAN during certificate generation for +using the ``localhost exception`` in the MongoDB instance. + +All certificates can have this attribute without compromising security as the +``localhost exception`` works only the first time. + +Tip: You can get help with the ``easyrsa`` command (and its subcommands) +by using the subcommand ``./easyrsa help`` + + +Step 3: Get the Server Certificate Signed +----------------------------------------- + +The CSR file (created in the last step) +should be located in ``pki/reqs/mdb-instance-0.req``. +You need to send it to the organization managing the cluster +so that they can use their CA +to sign the request. +(The managing organization should already have a self-signed CA.) + +If you are the admin of the managing organization's self-signed CA, +then you can import the CSR and use Easy-RSA to sign it. For example: + +.. code:: bash + + ./easyrsa import-req mdb-instance-0.req mdb-instance-0 + + ./easyrsa --subject-alt-name=DNS:localhost,DNS:mdb-instance-0 sign-req server mdb-instance-0 + +Once you have signed it, you can send the signed certificate +and the CA certificate back to the requestor. +The files are ``pki/issued/mdb-instance-0.crt`` and ``pki/ca.crt``. + + +Step 4: Generate the Consolidated Server PEM File +------------------------------------------------- + +MongoDB requires a single, consolidated file containing both the public and +private keys. + +.. code:: bash + + cat mdb-instance-0.crt mdb-instance-0.key > mdb-instance-0.pem + + +Step 5: Update the MongoDB Config File +-------------------------------------- + +In the MongoDB configuration file, +set the ``net.ssl.PEMKeyFile`` parameter to the path of the ``mdb-instance-0.pem`` file, +and the ``net.ssl.CAFile`` parameter to the ``ca.crt`` file. diff --git a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst index b967e764..a9e6792c 100644 --- a/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst +++ b/docs/server/source/cloud-deployment-templates/template-kubernetes-azure.rst @@ -86,6 +86,7 @@ confuse some software. $ az group create --name --location + Example location names are ``koreacentral`` and ``westeurope``. Finally, you can deploy an ACS using something like: @@ -95,12 +96,14 @@ Finally, you can deploy an ACS using something like: $ az acs create --name \ --resource-group \ --master-count 3 \ - --agent-count 3 \ + --agent-count 2 \ --admin-username ubuntu \ --agent-vm-size Standard_D2_v2 \ --dns-prefix \ --ssh-key-value ~/.ssh/.pub \ --orchestrator-type kubernetes + --debug --output json + There are more options. For help understanding all the options, use the built-in help: diff --git a/docs/server/source/cloud-deployment-templates/workflow.rst b/docs/server/source/cloud-deployment-templates/workflow.rst new file mode 100644 index 00000000..b8aa919f --- /dev/null +++ b/docs/server/source/cloud-deployment-templates/workflow.rst @@ -0,0 +1,123 @@ +Overview +======== + +This page summarizes the steps *we* go through +to set up a production BigchainDB cluster. +We are constantly improving them. +You can modify them to suit your needs. + + +Things the Managing Organization Must Do First +---------------------------------------------- + + +1. Set Up a Self-Signed Certificate Authority +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We use SSL/TLS and self-signed certificates +for MongoDB authentication (and message encryption). +The certificates are signed by the organization managing the cluster. +If your organization already has a process +for signing certificates +(i.e. an internal self-signed certificate authority [CA]), +then you can skip this step. +Otherwise, your organization must +:ref:`set up its own self-signed certificate authority `. + + +2. Register a Domain and Get an SSL Certificate for It +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The BigchainDB APIs (HTTP API and WebSocket API) should be served using TLS, +so the organization running the cluster +should choose an FQDN for their API (e.g. api.organization-x.com), +register the domain name, +and buy an SSL/TLS certificate for the FQDN. + + +Things Each Node Operator Must Do +--------------------------------- + +☐ Every MongoDB instance in the cluster must have a unique (one-of-a-kind) name. +Ask the organization managing your cluster if they have a standard +way of naming instances in the cluster. +For example, maybe they assign a unique number to each node, +so that if you're operating node 12, your MongoDB instance would be named +``mdb-instance-12``. +Similarly, other instances must also have unique names in the cluster. + +#. Name of the MongoDB instance (``mdb-instance-*``) +#. Name of the BigchainDB instance (``bdb-instance-*``) +#. Name of the NGINX instance (``ngx-instance-*``) +#. Name of the MongoDB monitoring agent instance (``mdb-mon-instance-*``) +#. Name of the MongoDB backup agent instance (``mdb-bak-instance-*``) + + +☐ Every node in a BigchainDB cluster needs its own +BigchainDB keypair (i.e. a public key and corresponding private key). +You can generate a BigchainDB keypair for your node, for example, +using the `BigchainDB Python Driver `_. + +.. code:: python + + from bigchaindb_driver.crypto import generate_keypair + print(generate_keypair()) + + +☐ Share your BigchaindB *public* key with all the other nodes +in the BigchainDB cluster. +Don't share your private key. + + +☐ Get the BigchainDB public keys of all the other nodes in the cluster. +That list of public keys is known as the BigchainDB "keyring." + + +☐ Ask the managing organization +for the FQDN used to serve the BigchainDB APIs +(e.g. ``api.orgname.net`` or ``bdb.clustername.com``). + + +☐ Make up an FQDN for your BigchainDB node (e.g. ``mynode.mycorp.com``). +Make sure you've registered the associated domain name (e.g. ``mycorp.com``), +and have an SSL certificate for the FQDN. +(You can get an SSL certificate from any SSL certificate provider). + + +☐ If the cluster uses 3scale for API authentication, monitoring and billing, +you must ask the managing organization for all relevant 3scale credentials. + + +☐ If the cluster uses MongoDB Cloud Manager for monitoring and backup, +you must ask the managing organization for the ``Agent Api Key``. +(Each Cloud Manager backup will have its own ``Agent Api Key``. +If there's one Cloud Manager backup, +there will be one ``Agent Api Key`` for the whole cluster.) + + +☐ Generate four keys and corresponding certificate signing requests (CSRs): + +#. Server Certificate (a.k.a. Member Certificate) for the MongoDB instance +#. Client Certificate for BigchainDB Server to identify itself to MongoDB +#. Client Certificate for MongoDB Monitoring Agent to identify itself to MongoDB +#. Client Certificate for MongoDB Backup Agent to identify itself to MongoDB + +Ask the managing organization to use its self-signed CA to sign those certificates. + +For help, see the pages: + +* :ref:`How to Generate a Server Certificate for MongoDB` +* :ref:`How to Generate a Client Certificate for MongoDB` + + +☐ :doc:`Deploy a Kubernetes cluster on Azure `. + + +☐ Create the Kubernetes Configuration for this node. +We will use Kubernetes ConfigMaps and Secrets to hold all the information +gathered above. + + +☐ Deploy your BigchainDB node on your Kubernetes cluster. + +TODO: Links to instructions for first-node-in-cluster or second-or-later-node-in-cluster \ No newline at end of file diff --git a/docs/server/source/drivers-clients/index.rst b/docs/server/source/drivers-clients/index.rst index 0bfde7ad..ef749d55 100644 --- a/docs/server/source/drivers-clients/index.rst +++ b/docs/server/source/drivers-clients/index.rst @@ -20,7 +20,7 @@ Community-Driven Libraries and Tools Some of these projects are a work in progress, but may still be useful. -* `Javascript transaction builder `_ +* `JavaScript / Node.js driver `_ * `Haskell transaction builder `_ * `Go driver `_ * `Java driver `_ diff --git a/docs/server/source/index.rst b/docs/server/source/index.rst index bf45aca0..0dee9174 100644 --- a/docs/server/source/index.rst +++ b/docs/server/source/index.rst @@ -7,14 +7,14 @@ BigchainDB Server Documentation ← Back to All BigchainDB Docs introduction quickstart - cloud-deployment-templates/index production-nodes/index + clusters-feds/index + cloud-deployment-templates/index dev-and-test/index server-reference/index http-client-server-api websocket-event-stream-api drivers-clients/index - clusters-feds/index data-models/index schema/transaction schema/vote diff --git a/docs/server/source/quickstart.md b/docs/server/source/quickstart.md index 2eae07f0..5c2b0500 100644 --- a/docs/server/source/quickstart.md +++ b/docs/server/source/quickstart.md @@ -8,7 +8,7 @@ A. Install MongoDB as the database backend. (There are other options but you can B. Run MongoDB. Open a Terminal and run the command: ```text -$ mongod --replSet=bigchain-rs +$ sudo mongod --replSet=bigchain-rs ``` C. Ubuntu 16.04 already has Python 3.5, so you don't need to install it, but you do need to install some other things: diff --git a/k8s/bigchaindb/bigchaindb-dep.yaml b/k8s/bigchaindb/bigchaindb-dep.yaml index b8550249..140ef50e 100644 --- a/k8s/bigchaindb/bigchaindb-dep.yaml +++ b/k8s/bigchaindb/bigchaindb-dep.yaml @@ -18,7 +18,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: bigchaindb - image: bigchaindb/bigchaindb:0.10.1 + image: bigchaindb/bigchaindb:0.10.2 imagePullPolicy: IfNotPresent args: - start @@ -35,6 +35,10 @@ spec: value: bigchain - name: BIGCHAINDB_SERVER_BIND value: 0.0.0.0:9984 + - name: BIGCHAINDB_WSSERVER_HOST + value: 0.0.0.0 + - name: BIGCHAINDB_WSSERVER_PORT + value: "9985" - name: BIGCHAINDB_KEYPAIR_PUBLIC value: "" - name: BIGCHAINDB_KEYPAIR_PRIVATE @@ -54,7 +58,11 @@ spec: - containerPort: 9984 hostPort: 9984 name: bdb-port - protocol: TCP + protocol: TCP + - containerPort: 9985 + hostPort: 9985 + name: bdb-ws-port + protocol: TCP resources: limits: cpu: 200m diff --git a/k8s/bigchaindb/bigchaindb-svc.yaml b/k8s/bigchaindb/bigchaindb-svc.yaml index 9927a92d..272cb533 100644 --- a/k8s/bigchaindb/bigchaindb-svc.yaml +++ b/k8s/bigchaindb/bigchaindb-svc.yaml @@ -12,5 +12,8 @@ spec: - port: 9984 targetPort: 9984 name: bdb-port + - port: 9985 + targetPort: 9985 + name: bdb-ws-port type: ClusterIP clusterIP: None diff --git a/k8s/mongodb-backup-agent/container/docker_build_and_push.bash b/k8s/mongodb-backup-agent/container/docker_build_and_push.bash index e57e58a1..5d1780ea 100755 --- a/k8s/mongodb-backup-agent/container/docker_build_and_push.bash +++ b/k8s/mongodb-backup-agent/container/docker_build_and_push.bash @@ -1,5 +1,5 @@ #!/bin/bash -docker build -t bigchaindb/mongodb-backup-agent:1.0 . +docker build -t bigchaindb/mongodb-backup-agent:2.0 . -docker push bigchaindb/mongodb-backup-agent:1.0 +docker push bigchaindb/mongodb-backup-agent:2.0 diff --git a/k8s/mongodb-backup-agent/container/mongodb_backup_agent_entrypoint.bash b/k8s/mongodb-backup-agent/container/mongodb_backup_agent_entrypoint.bash index ef3895ea..6b663fe9 100755 --- a/k8s/mongodb-backup-agent/container/mongodb_backup_agent_entrypoint.bash +++ b/k8s/mongodb-backup-agent/container/mongodb_backup_agent_entrypoint.bash @@ -5,17 +5,28 @@ set -euo pipefail MONGODB_BACKUP_CONF_FILE=/etc/mongodb-mms/backup-agent.config mms_api_key=`printenv MMS_API_KEY` +ca_crt_path=`printenv CA_CRT_PATH` +backup_crt_path=`printenv BACKUP_PEM_PATH` -if [[ -z "${mms_api_key}" ]]; then +if [[ -z "${mms_api_key}" || \ + -z "${ca_crt_path}" || \ + -z "${backup_crt_path}" ]]; then echo "Invalid environment settings detected. Exiting!" exit 1 fi -sed -i '/mmsApiKey/d' $MONGODB_BACKUP_CONF_FILE -sed -i '/mothership/d' $MONGODB_BACKUP_CONF_FILE +sed -i '/mmsApiKey/d' ${MONGODB_BACKUP_CONF_FILE} +sed -i '/mothership/d' ${MONGODB_BACKUP_CONF_FILE} -echo "mmsApiKey="${mms_api_key} >> $MONGODB_BACKUP_CONF_FILE -echo "mothership=api-backup.eu-west-1.mongodb.com" >> $MONGODB_BACKUP_CONF_FILE +echo "mmsApiKey="${mms_api_key} >> ${MONGODB_BACKUP_CONF_FILE} +echo "mothership=api-backup.eu-west-1.mongodb.com" >> ${MONGODB_BACKUP_CONF_FILE} + +# Append SSL settings to the config file +echo "useSslForAllConnections=true" >> ${MONGODB_BACKUP_CONF_FILE} +echo "sslRequireValidServerCertificates=true" >> ${MONGODB_BACKUP_CONF_FILE} +echo "sslTrustedServerCertificates="${ca_crt_path} >> ${MONGODB_BACKUP_CONF_FILE} +echo "sslClientCertificate="${backup_crt_path} >> ${MONGODB_BACKUP_CONF_FILE} +echo "#sslClientCertificatePassword=" >> ${MONGODB_BACKUP_CONF_FILE} echo "INFO: starting mdb backup..." exec mongodb-mms-backup-agent -c $MONGODB_BACKUP_CONF_FILE diff --git a/k8s/mongodb-monitoring-agent/container/docker_build_and_push.bash b/k8s/mongodb-monitoring-agent/container/docker_build_and_push.bash index d2219b08..caefb6d7 100755 --- a/k8s/mongodb-monitoring-agent/container/docker_build_and_push.bash +++ b/k8s/mongodb-monitoring-agent/container/docker_build_and_push.bash @@ -1,5 +1,5 @@ #!/bin/bash -docker build -t bigchaindb/mongodb-monitoring-agent:1.0 . +docker build -t bigchaindb/mongodb-monitoring-agent:2.0 . -docker push bigchaindb/mongodb-monitoring-agent:1.0 +docker push bigchaindb/mongodb-monitoring-agent:2.0 diff --git a/k8s/mongodb-monitoring-agent/container/mongodb_mon_agent_entrypoint.bash b/k8s/mongodb-monitoring-agent/container/mongodb_mon_agent_entrypoint.bash index 6454c729..9ef96303 100755 --- a/k8s/mongodb-monitoring-agent/container/mongodb_mon_agent_entrypoint.bash +++ b/k8s/mongodb-monitoring-agent/container/mongodb_mon_agent_entrypoint.bash @@ -9,8 +9,12 @@ set -euo pipefail MONGODB_MON_CONF_FILE=/etc/mongodb-mms/monitoring-agent.config mms_api_key=`printenv MMS_API_KEY` +ca_crt_path=`printenv CA_CRT_PATH` +monitoring_crt_path=`printenv MONITORING_PEM_PATH` -if [[ -z "${mms_api_key}" ]]; then +if [[ -z "${mms_api_key}" || \ + -z "${ca_crt_path}" || \ + -z "${monitoring_crt_path}" ]]; then echo "Invalid environment settings detected. Exiting!" exit 1 fi @@ -21,7 +25,14 @@ sed -i '/mmsApiKey/d' $MONGODB_MON_CONF_FILE # Append a new line of the form # mmsApiKey=value_of_MMS_API_KEY -echo "mmsApiKey="${mms_api_key} >> $MONGODB_MON_CONF_FILE +echo "mmsApiKey="${mms_api_key} >> ${MONGODB_MON_CONF_FILE} + +# Append SSL settings to the config file +echo "useSslForAllConnections=true" >> ${MONGODB_MON_CONF_FILE} +echo "sslRequireValidServerCertificates=true" >> ${MONGODB_MON_CONF_FILE} +echo "sslTrustedServerCertificates="${ca_crt_path} >> ${MONGODB_MON_CONF_FILE} +echo "sslClientCertificate="${monitoring_crt_path} >> ${MONGODB_MON_CONF_FILE} +echo "#sslClientCertificatePassword=" >> ${MONGODB_MON_CONF_FILE} # start mdb monitoring agent echo "INFO: starting mdb monitor..." diff --git a/k8s/mongodb/container/Dockerfile b/k8s/mongodb/container/Dockerfile index e9667f95..66b076c7 100644 --- a/k8s/mongodb/container/Dockerfile +++ b/k8s/mongodb/container/Dockerfile @@ -1,12 +1,13 @@ -FROM mongo:3.4.3 +FROM mongo:3.4.4 LABEL maintainer "dev@bigchaindb.com" WORKDIR / RUN apt-get update \ && apt-get -y upgrade \ && apt-get autoremove \ - && apt-get clean -COPY mongod.conf.template /etc/mongod.conf.template -COPY mongod_entrypoint/mongod_entrypoint / -VOLUME /data/db /data/configdb + && apt-get clean \ + && mkdir /mongo-ssl +COPY mongod.conf.template /etc/mongod.conf +COPY mongod_entrypoint.bash / +VOLUME /data/db /data/configdb /mongo-ssl EXPOSE 27017 -ENTRYPOINT ["/mongod_entrypoint"] +ENTRYPOINT ["/mongod_entrypoint.bash"] diff --git a/k8s/mongodb/container/Makefile b/k8s/mongodb/container/Makefile deleted file mode 100644 index 0a3779af..00000000 --- a/k8s/mongodb/container/Makefile +++ /dev/null @@ -1,51 +0,0 @@ -# Targets: -# all: Cleans, formats src files, builds the code, builds the docker image -# clean: Removes the binary and docker image -# format: Formats the src files -# build: Builds the code -# docker: Builds the code and docker image -# push: Push the docker image to Docker hub - -GOCMD=go -GOVET=$(GOCMD) tool vet -GOINSTALL=$(GOCMD) install -GOFMT=gofmt -s -w - -DOCKER_IMAGE_NAME?=bigchaindb/mongodb -DOCKER_IMAGE_TAG?=3.4.3 - -PWD=$(shell pwd) -BINARY_PATH=$(PWD)/mongod_entrypoint/ -BINARY_NAME=mongod_entrypoint -MAIN_FILE = $(BINARY_PATH)/mongod_entrypoint.go -SRC_FILES = $(BINARY_PATH)/mongod_entrypoint.go - -.PHONY: all - -all: clean build docker - -clean: - @echo "removing any pre-built binary"; - -@rm $(BINARY_PATH)/$(BINARY_NAME); - @echo "remove any pre-built docker image"; - -@docker rmi $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG); - -format: - $(GOFMT) $(SRC_FILES) - -build: format - $(shell cd $(BINARY_PATH) && \ - export GOPATH="$(BINARY_PATH)" && \ - export GOBIN="$(BINARY_PATH)" && \ - CGO_ENABLED=0 GOOS=linux $(GOINSTALL) -ldflags "-s" -a -installsuffix cgo $(MAIN_FILE)) - -docker: build - docker build \ - -t $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG) .; - -vet: - $(GOVET) . - -push: - docker push \ - $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG); diff --git a/k8s/mongodb/container/README.md b/k8s/mongodb/container/README.md index baad9f13..9f9c46d1 100644 --- a/k8s/mongodb/container/README.md +++ b/k8s/mongodb/container/README.md @@ -2,7 +2,7 @@ ### Need -* MongoDB needs the hostname provided in the rs.initiate() command to be +* MongoDB needs the hostname provided in the `rs.initiate()` command to be resolvable through the hosts file locally. * In the future, with the introduction of TLS for inter-cluster MongoDB communications, we will need a way to specify detailed configuration. @@ -11,32 +11,52 @@ ### Step 1: Build the Latest Container -`make` from the root of this project. +`docker build -t bigchaindb/mongodb:3.4.4 .` from the root of this project. ### Step 2: Run the Container ``` docker run \ ---name=mdb1 \ ---publish=: \ ---rm=true \ -bigchaindb/mongodb \ ---replica-set-name \ ---fqdn \ ---port + --cap-add=FOWNER \ + --name=mdb1 \ + --publish=: \ + --rm=true \ + --volume=:/data/db \ + --volume=:/data/configdb \ + --volume=:/mongo-ssl:ro \ + bigchaindb/mongodb:3.4.4 \ + --mongodb-port \ + --mongodb-key-file-path /mongo-ssl/.pem \ + --mongodb-key-file-password \ + --mongodb-ca-file-path /mongo-ssl/.crt \ + --mongodb-crl-file-path /mongo-ssl/.pem \ + --replica-set-name \ + --mongodb-fqdn \ + --mongodb-ip ``` #### Step 3: Initialize the Replica Set Login to one of the MongoDB containers, say mdb1: -`docker exec -it mdb1 bash` +`docker exec -it mongodb bash` + +Since we need TLS certificates to use the mongo shell now, copy them using: + +``` +docker cp bdb-instance-0.pem mongodb:/ +docker cp ca.crt mongodb:/ +``` Start the `mongo` shell: -`mongo --port 27017` - +``` +mongo --host mdb1-fqdn --port mdb1-port --verbose --ssl \ + --sslCAFile /ca.crt \ + --sslPEMKeyFile /bdb-instance-0.pem \ + --sslPEMKeyPassword password +``` Run the rs.initiate() command: ``` diff --git a/k8s/mongodb/container/mongod.conf.template b/k8s/mongodb/container/mongod.conf.template index 28e74acf..5b5f5c1f 100644 --- a/k8s/mongodb/container/mongod.conf.template +++ b/k8s/mongodb/container/mongod.conf.template @@ -6,7 +6,7 @@ # where to write logging data. systemLog: verbosity: 0 - #TODO traceAllExceptions: true + # traceAllExceptions: true timeStampFormat: iso8601-utc component: accessControl: @@ -41,7 +41,7 @@ processManagement: pidFilePath: /tmp/mongod.pid net: - port: PORT + port: MONGODB_PORT bindIp: 0.0.0.0 maxIncomingConnections: 8192 wireObjectCheck: false @@ -53,11 +53,24 @@ net: enabled: false compression: compressors: snappy - #ssl: TODO + ssl: + mode: requireSSL + PEMKeyFile: MONGODB_KEY_FILE_PATH + #PEMKeyPassword: MONGODB_KEY_FILE_PASSWORD + CAFile: MONGODB_CA_FILE_PATH + CRLFile: MONGODB_CRL_FILE_PATH + + #allowConnectionsWithoutCertificates: false + #allowInvalidHostnames: false + #weakCertificateValidation: false + #allowInvalidCertificates: false #security: TODO +# authorization: enabled +# clusterAuthMode: x509 -#setParameter: +setParameter: + enableLocalhostAuthBypass: true #notablescan: 1 TODO #logUserIds: 1 TODO @@ -85,5 +98,3 @@ replication: replSetName: REPLICA_SET_NAME enableMajorityReadConcern: true -#sharding: - diff --git a/k8s/mongodb/container/mongod_entrypoint.bash b/k8s/mongodb/container/mongod_entrypoint.bash new file mode 100755 index 00000000..157c92c9 --- /dev/null +++ b/k8s/mongodb/container/mongod_entrypoint.bash @@ -0,0 +1,91 @@ +#!/bin/bash +set -euo pipefail + +MONGODB_PORT="" +MONGODB_KEY_FILE_PATH="" +#MONGODB_KEY_FILE_PASSWORD="" +MONGODB_CA_FILE_PATH="" +MONGODB_CRL_FILE_PATH="" +REPLICA_SET_NAME="" +MONGODB_FQDN="" +MONGODB_IP="" + +while [[ $# -gt 1 ]]; do + arg="$1" + case $arg in + --mongodb-port) + MONGODB_PORT="$2" + shift + ;; + --mongodb-key-file-path) + MONGODB_KEY_FILE_PATH="$2" + shift + ;; + --mongodb-key-file-password) + # TODO(Krish) move this to a mapped file later + MONGODB_KEY_FILE_PASSWORD="$2" + shift + ;; + --mongodb-ca-file-path) + MONGODB_CA_FILE_PATH="$2" + shift + ;; + --mongodb-crl-file-path) + MONGODB_CRL_FILE_PATH="$2" + shift + ;; + --replica-set-name) + REPLICA_SET_NAME="$2" + shift + ;; + --mongodb-fqdn) + MONGODB_FQDN="$2" + shift + ;; + --mongodb-ip) + MONGODB_IP="$2" + shift + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac + shift +done + +# sanity checks +if [[ -z "${REPLICA_SET_NAME}" || \ + -z "${MONGODB_PORT}" || \ + -z "${MONGODB_FQDN}" || \ + -z "${MONGODB_IP}" || \ + -z "${MONGODB_KEY_FILE_PATH}" || \ + -z "${MONGODB_CA_FILE_PATH}" || \ + -z "${MONGODB_CRL_FILE_PATH}" ]] ; then + #-z "${MONGODB_KEY_FILE_PASSWORD}" || \ + echo "Empty parameters detected. Exiting!" + exit 2 +fi + +MONGODB_CONF_FILE_PATH=/etc/mongod.conf +HOSTS_FILE_PATH=/etc/hosts + +# configure the mongod.conf file +sed -i "s|MONGODB_PORT|${MONGODB_PORT}|g" ${MONGODB_CONF_FILE_PATH} +sed -i "s|MONGODB_KEY_FILE_PATH|${MONGODB_KEY_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH} +#sed -i "s|MONGODB_KEY_FILE_PASSWORD|${MONGODB_KEY_FILE_PASSWORD}|g" ${MONGODB_CONF_FILE_PATH} +sed -i "s|MONGODB_CA_FILE_PATH|${MONGODB_CA_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH} +sed -i "s|MONGODB_CRL_FILE_PATH|${MONGODB_CRL_FILE_PATH}|g" ${MONGODB_CONF_FILE_PATH} +sed -i "s|REPLICA_SET_NAME|${REPLICA_SET_NAME}|g" ${MONGODB_CONF_FILE_PATH} + +# add the hostname and ip to hosts file +echo "${MONGODB_IP} ${MONGODB_FQDN}" >> $HOSTS_FILE_PATH + +# start mongod +echo "INFO: starting mongod..." + +# TODO Uncomment the first exec command and use it instead of the second one +# after https://github.com/docker-library/mongo/issues/172 is resolved. Check +# for other bugs too. +#exec /entrypoint.sh mongod --config ${MONGODB_CONF_FILE_PATH} +exec /usr/bin/mongod --config ${MONGODB_CONF_FILE_PATH} diff --git a/k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go b/k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go deleted file mode 100644 index 57b48974..00000000 --- a/k8s/mongodb/container/mongod_entrypoint/mongod_entrypoint.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "regexp" - "syscall" -) - -const ( - mongoConfFilePath string = "/etc/mongod.conf" - mongoConfTemplateFilePath string = "/etc/mongod.conf.template" - hostsFilePath string = "/etc/hosts" -) - -var ( - // Use the same entrypoint as the mongo:3.4.2 image; just supply it with - // the mongod conf file with custom params - mongoStartCmd []string = []string{"/entrypoint.sh", "mongod", "--config", - mongoConfFilePath} -) - -// context struct stores the user input and the constraints for the specified -// input. It also stores the keyword that needs to be replaced in the template -// files. -type context struct { - cliInput string - templateKeyword string - regex string -} - -// sanity function takes the pre-defined constraints and the user inputs as -// arguments and validates user input based on regex matching -func sanity(input map[string]*context, fqdn, ip string) error { - var format *regexp.Regexp - for _, ctx := range input { - format = regexp.MustCompile(ctx.regex) - if format.MatchString(ctx.cliInput) == false { - return errors.New(fmt.Sprintf( - "Invalid value: '%s' for '%s'. Can be '%s'", - ctx.cliInput, - ctx.templateKeyword, - ctx.regex)) - } - } - - format = regexp.MustCompile(`[a-z0-9-.]+`) - if format.MatchString(fqdn) == false { - return errors.New(fmt.Sprintf( - "Invalid value: '%s' for FQDN. Can be '%s'", - fqdn, - format)) - } - - if net.ParseIP(ip) == nil { - return errors.New(fmt.Sprintf( - "Invalid value: '%s' for IPv4. Can be a.b.c.d", - ip)) - } - - return nil -} - -// createFile function takes the pre-defined keywords, user inputs, the -// template file path and the new file path location as parameters, and -// creates a new file at file path with all the keywords replaced by inputs. -func createFile(input map[string]*context, - template string, conf string) error { - // read the template - contents, err := ioutil.ReadFile(template) - if err != nil { - return err - } - // replace - for _, ctx := range input { - contents = bytes.Replace(contents, []byte(ctx.templateKeyword), - []byte(ctx.cliInput), -1) - } - // write - err = ioutil.WriteFile(conf, contents, 0644) - if err != nil { - return err - } - return nil -} - -// updateHostsFile takes the FQDN supplied as input to the container and adds -// an entry to /etc/hosts -func updateHostsFile(ip, fqdn string) error { - fileHandle, err := os.OpenFile(hostsFilePath, os.O_APPEND|os.O_WRONLY, - os.ModeAppend) - if err != nil { - return err - } - defer fileHandle.Close() - // append - _, err = fileHandle.WriteString(fmt.Sprintf("\n%s %s\n", ip, fqdn)) - if err != nil { - return err - } - return nil -} - -func main() { - var fqdn, ip string - input := make(map[string]*context) - - input["replica-set-name"] = &context{} - input["replica-set-name"].regex = `[a-z]+` - input["replica-set-name"].templateKeyword = "REPLICA_SET_NAME" - flag.StringVar(&input["replica-set-name"].cliInput, - "replica-set-name", - "", - "replica set name") - - input["port"] = &context{} - input["port"].regex = `[0-9]{4,5}` - input["port"].templateKeyword = "PORT" - flag.StringVar(&input["port"].cliInput, - "port", - "", - "mongodb port number") - - flag.StringVar(&fqdn, "fqdn", "", "FQDN of the MongoDB instance") - flag.StringVar(&ip, "ip", "", "IPv4 address of the container") - - flag.Parse() - err := sanity(input, fqdn, ip) - if err != nil { - log.Fatal(err) - } - - err = createFile(input, mongoConfTemplateFilePath, mongoConfFilePath) - if err != nil { - log.Fatal(err) - } - - err = updateHostsFile(ip, fqdn) - if err != nil { - log.Fatal(err) - } - - fmt.Printf("Starting Mongod....") - err = syscall.Exec(mongoStartCmd[0], mongoStartCmd[0:], os.Environ()) - if err != nil { - panic(err) - } -} diff --git a/k8s/nginx/nginx-dep.yaml b/k8s/nginx/nginx-dep.yaml index 684ae552..0aad0b2d 100644 --- a/k8s/nginx/nginx-dep.yaml +++ b/k8s/nginx/nginx-dep.yaml @@ -43,15 +43,23 @@ spec: configMapKeyRef: name: mongodb-whitelist key: allowed-hosts + - name: BIGCHAINDB_WS_FRONTEND_PORT + value: "81" + - name: BIGCHAINDB_WS_BACKEND_PORT + value: "9985" ports: - containerPort: 27017 hostPort: 27017 name: public-mdb-port - protocol: TCP + protocol: TCP - containerPort: 80 hostPort: 80 name: public-bdb-port - protocol: TCP + protocol: TCP + - containerPort: 81 + hostPort: 81 + name: public-ws-port + protocol: TCP resources: limits: cpu: 200m diff --git a/k8s/nginx/nginx-svc.yaml b/k8s/nginx/nginx-svc.yaml index 8b0cded4..b9d8bcaf 100644 --- a/k8s/nginx/nginx-svc.yaml +++ b/k8s/nginx/nginx-svc.yaml @@ -21,4 +21,8 @@ spec: targetPort: 80 name: ngx-public-bdb-port protocol: TCP + - port: 81 + targetPort: 81 + name: ngx-public-ws-port + protocol: TCP type: LoadBalancer diff --git a/k8s/toolbox/Dockerfile b/k8s/toolbox/Dockerfile index c9adfb5e..2beeb9d1 100644 --- a/k8s/toolbox/Dockerfile +++ b/k8s/toolbox/Dockerfile @@ -7,9 +7,10 @@ FROM alpine:3.5 LABEL maintainer "dev@bigchaindb.com" WORKDIR / RUN apk add --no-cache --update curl bind-tools python3-dev g++ \ - libffi-dev make vim git \ + libffi-dev make vim git nodejs \ && pip3 install ipython \ && git clone https://github.com/bigchaindb/bigchaindb-driver \ && cd bigchaindb-driver \ - && pip3 install -e . + && pip3 install -e . \ + && npm install -g wsc ENTRYPOINT ["/bin/sh"] diff --git a/setup.py b/setup.py index 179b561f..55543ea3 100644 --- a/setup.py +++ b/setup.py @@ -71,6 +71,7 @@ install_requires = [ 'python-rapidjson==0.0.11', 'logstats>=0.2.1', 'flask>=0.10.1', + 'flask-cors~=3.0.0', 'flask-restful~=0.3.0', 'requests~=2.9', 'gunicorn~=19.0', diff --git a/tests/README.md b/tests/README.md index d0e2da52..e6de82c9 100644 --- a/tests/README.md +++ b/tests/README.md @@ -27,8 +27,7 @@ BigchainDB from source. The [`CONTRIBUTING.md` file](../CONTRIBUTING.md) has instructions for how to do that. Next, make sure you have RethinkDB or MongoDB running in the background. You -can run RethinkDB using `rethinkdb --daemon` or MongoDB using `mongod ---replSet=rs0`. +can run RethinkDB using `rethinkdb --daemon` or MongoDB using `mongod --replSet=bigchain-rs`. The `pytest` command has many options. If you want to learn about all the things you can do with pytest, see [the pytest diff --git a/tests/backend/mongodb/test_queries.py b/tests/backend/mongodb/test_queries.py index c43c5fa4..c3792064 100644 --- a/tests/backend/mongodb/test_queries.py +++ b/tests/backend/mongodb/test_queries.py @@ -208,10 +208,10 @@ def test_get_owned_ids(signed_create_tx, user_pk): block = Block(transactions=[signed_create_tx]) conn.db.bigchain.insert_one(block.to_dict()) - owned_ids = list(query.get_owned_ids(conn, user_pk)) + [(block_id, tx)] = list(query.get_owned_ids(conn, user_pk)) - assert len(owned_ids) == 1 - assert owned_ids[0] == signed_create_tx.to_dict() + assert block_id == block.id + assert tx == signed_create_tx.to_dict() def test_get_votes_by_block_id(signed_create_tx, structurally_valid_vote): @@ -423,6 +423,55 @@ def test_get_txids_filtered(signed_create_tx, signed_transfer_tx): assert txids == {signed_transfer_tx.id} +def test_get_spending_transactions(user_pk): + from bigchaindb.backend import connect, query + from bigchaindb.models import Block, Transaction + conn = connect() + + out = [([user_pk], 1)] + tx1 = Transaction.create([user_pk], out * 3) + inputs = tx1.to_inputs() + tx2 = Transaction.transfer([inputs[0]], out, tx1.id) + tx3 = Transaction.transfer([inputs[1]], out, tx1.id) + tx4 = Transaction.transfer([inputs[2]], out, tx1.id) + block = Block([tx1, tx2, tx3, tx4]) + conn.db.bigchain.insert_one(block.to_dict()) + + links = [inputs[0].fulfills.to_dict(), inputs[2].fulfills.to_dict()] + res = list(query.get_spending_transactions(conn, links)) + + # tx3 not a member because input 1 not asked for + assert res == [(block.id, tx2.to_dict()), (block.id, tx4.to_dict())] + + +def test_get_votes_for_blocks_by_voter(): + from bigchaindb.backend import connect, query + + conn = connect() + votes = [ + { + 'node_pubkey': 'a', + 'vote': {'voting_for_block': 'block1'}, + }, + { + 'node_pubkey': 'b', + 'vote': {'voting_for_block': 'block1'}, + }, + { + 'node_pubkey': 'a', + 'vote': {'voting_for_block': 'block2'}, + }, + { + 'node_pubkey': 'a', + 'vote': {'voting_for_block': 'block3'}, + } + ] + for vote in votes: + conn.db.votes.insert_one(vote.copy()) + res = query.get_votes_for_blocks_by_voter(conn, ['block1', 'block2'], 'a') + assert list(res) == [votes[0], votes[2]] + + def test_write_assets(): from bigchaindb.backend import connect, query conn = connect() diff --git a/tests/backend/test_generics.py b/tests/backend/test_generics.py index 6a1e9447..4a7cd5ec 100644 --- a/tests/backend/test_generics.py +++ b/tests/backend/test_generics.py @@ -36,6 +36,8 @@ def test_schema(schema_func_name, args_qty): ('get_votes_by_block_id_and_voter', 2), ('update_transaction', 2), ('get_transaction_from_block', 2), + ('get_votes_for_blocks_by_voter', 2), + ('get_spending_transactions', 1), ('write_assets', 1), ('get_assets', 1), )) diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 37079ddd..b50a2a67 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -88,11 +88,12 @@ def test_bigchain_show_config(capsys): assert output_config == config +@pytest.mark.usefixtures('ignore_local_config_file') def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): from bigchaindb import config from bigchaindb.commands.bigchaindb import run_export_my_pubkey - args = Namespace(config='dummy') + args = Namespace(config=None) # so in run_export_my_pubkey(args) below, # filename=args.config='dummy' is passed to autoconfigure(). # We just assume autoconfigure() works and sets @@ -107,11 +108,12 @@ def test_bigchain_export_my_pubkey_when_pubkey_set(capsys, monkeypatch): assert 'Charlie_Bucket' in lines +@pytest.mark.usefixtures('ignore_local_config_file') def test_bigchain_export_my_pubkey_when_pubkey_not_set(monkeypatch): from bigchaindb import config from bigchaindb.commands.bigchaindb import run_export_my_pubkey - args = Namespace(config='dummy') + args = Namespace(config=None) monkeypatch.setitem(config['keypair'], 'public', None) # assert that run_export_my_pubkey(args) raises SystemExit: with pytest.raises(SystemExit) as exc_info: diff --git a/tests/conftest.py b/tests/conftest.py index 26beac11..d60b4511 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -199,7 +199,7 @@ def _genesis(_bdb, genesis_block): @pytest.fixture def ignore_local_config_file(monkeypatch): def mock_file_config(filename=None): - raise FileNotFoundError() + return {} monkeypatch.setattr('bigchaindb.config_utils.file_config', mock_file_config) diff --git a/tests/db/test_bigchain_api.py b/tests/db/test_bigchain_api.py index 5960f171..0371ecbf 100644 --- a/tests/db/test_bigchain_api.py +++ b/tests/db/test_bigchain_api.py @@ -1119,11 +1119,11 @@ def test_get_owned_ids_calls_get_outputs_filtered(): def test_get_outputs_filtered_only_unspent(): from bigchaindb.common.transaction import TransactionLink from bigchaindb.core import Bigchain - with patch('bigchaindb.core.Bigchain.get_outputs') as get_outputs: + with patch('bigchaindb.fastquery.FastQuery.get_outputs_by_public_key') as get_outputs: get_outputs.return_value = [TransactionLink('a', 1), TransactionLink('b', 2)] - with patch('bigchaindb.core.Bigchain.get_spent') as get_spent: - get_spent.side_effect = [True, False] + with patch('bigchaindb.fastquery.FastQuery.filter_spent_outputs') as filter_spent: + filter_spent.return_value = [TransactionLink('b', 2)] out = Bigchain().get_outputs_filtered('abc', include_spent=False) get_outputs.assert_called_once_with('abc') assert out == [TransactionLink('b', 2)] @@ -1132,13 +1132,13 @@ def test_get_outputs_filtered_only_unspent(): def test_get_outputs_filtered(): from bigchaindb.common.transaction import TransactionLink from bigchaindb.core import Bigchain - with patch('bigchaindb.core.Bigchain.get_outputs') as get_outputs: + with patch('bigchaindb.fastquery.FastQuery.get_outputs_by_public_key') as get_outputs: get_outputs.return_value = [TransactionLink('a', 1), TransactionLink('b', 2)] - with patch('bigchaindb.core.Bigchain.get_spent') as get_spent: + with patch('bigchaindb.fastquery.FastQuery.filter_spent_outputs') as filter_spent: out = Bigchain().get_outputs_filtered('abc') get_outputs.assert_called_once_with('abc') - get_spent.assert_not_called() + filter_spent.assert_not_called() assert out == get_outputs.return_value diff --git a/tests/test_config_utils.py b/tests/test_config_utils.py index bb445d83..88c3431e 100644 --- a/tests/test_config_utils.py +++ b/tests/test_config_utils.py @@ -257,6 +257,18 @@ def test_autoconfigure_env_precedence(monkeypatch): assert bigchaindb.config['server']['bind'] == 'localhost:9985' +def test_autoconfigure_explicit_file(monkeypatch): + from bigchaindb import config_utils + + def file_config(*args, **kwargs): + raise FileNotFoundError() + + monkeypatch.setattr('bigchaindb.config_utils.file_config', file_config) + + with pytest.raises(FileNotFoundError): + config_utils.autoconfigure(filename='autoexec.bat') + + def test_update_config(monkeypatch): import bigchaindb from bigchaindb import config_utils diff --git a/tests/test_fastquery.py b/tests/test_fastquery.py new file mode 100644 index 00000000..8fb3378c --- /dev/null +++ b/tests/test_fastquery.py @@ -0,0 +1,86 @@ +import pytest + +from bigchaindb.common.transaction import TransactionLink +from bigchaindb.models import Block, Transaction + +pytestmark = pytest.mark.bdb + + +@pytest.fixture +def blockdata(b, user_pk, user2_pk): + txs = [Transaction.create([user_pk], [([user2_pk], 1)]), + Transaction.create([user2_pk], [([user_pk], 1)]), + Transaction.create([user_pk], [([user_pk], 1), ([user2_pk], 1)])] + blocks = [] + for i in range(3): + block = Block([txs[i]]) + b.write_block(block) + blocks.append(block.to_dict()) + b.write_vote(b.vote(blocks[1]['id'], '', True)) + b.write_vote(b.vote(blocks[2]['id'], '', False)) + return blocks, [b['id'] for b in blocks] + + +def test_filter_valid_block_ids_with_undecided(b, blockdata): + blocks, block_ids = blockdata + valid_ids = b.fastquery.filter_valid_block_ids(block_ids, include_undecided=True) + assert set(valid_ids) == {blocks[0]['id'], blocks[1]['id']} + + +def test_filter_valid_block_ids_only_valid(b, blockdata): + blocks, block_ids = blockdata + valid_ids = b.fastquery.filter_valid_block_ids(block_ids) + assert set(valid_ids) == {blocks[1]['id']} + + +def test_filter_valid_items(b, blockdata): + blocks, _ = blockdata + assert (b.fastquery.filter_valid_items(blocks, block_id_key=lambda b: b['id']) + == [blocks[0], blocks[1]]) + + +def test_get_outputs_by_public_key(b, user_pk, user2_pk, blockdata): + blocks, _ = blockdata + assert b.fastquery.get_outputs_by_public_key(user_pk) == [ + TransactionLink(blocks[1]['block']['transactions'][0]['id'], 0) + ] + assert b.fastquery.get_outputs_by_public_key(user2_pk) == [ + TransactionLink(blocks[0]['block']['transactions'][0]['id'], 0) + ] + + +def test_filter_spent_outputs(b, user_pk): + out = [([user_pk], 1)] + tx1 = Transaction.create([user_pk], out * 3) + + # There are 3 inputs + inputs = tx1.to_inputs() + + # Each spent individually + tx2 = Transaction.transfer([inputs[0]], out, tx1.id) + tx3 = Transaction.transfer([inputs[1]], out, tx1.id) + tx4 = Transaction.transfer([inputs[2]], out, tx1.id) + + # The CREATE and first TRANSFER are valid. tx2 produces a new unspent. + for tx in [tx1, tx2]: + block = Block([tx]) + b.write_block(block) + b.write_vote(b.vote(block.id, '', True)) + + # The second TRANSFER is invalid. inputs[1] remains unspent. + block = Block([tx3]) + b.write_block(block) + b.write_vote(b.vote(block.id, '', False)) + + # The third TRANSFER is undecided. It procuces a new unspent. + block = Block([tx4]) + b.write_block(block) + + outputs = b.fastquery.get_outputs_by_public_key(user_pk) + unspents = b.fastquery.filter_spent_outputs(outputs) + + assert set(unspents) == { + inputs[1].fulfills, + tx2.to_inputs()[0].fulfills, + tx4.to_inputs()[0].fulfills + }