Compare commits

...

22 Commits

Author SHA1 Message Date
Tomoaki Imai d95b53862f arg explanation update for _currentRoot for updateDepositTree 2022-03-08 11:18:53 +07:00
Tomoaki Imai f6bfbf09be arg explanation update for _currentRoot 2022-03-08 11:18:53 +07:00
poma 17fc574af3
docs 2021-03-25 17:44:52 +03:00
poma a18cc18e91
update version 2021-03-22 20:28:56 +03:00
poma 790f049719
fix CVF-7 2021-03-22 20:25:42 +03:00
poma 53e7a69556
fix CVF-21 2021-03-22 20:16:14 +03:00
poma 8a82afba42
fix CVF-26 2021-03-22 20:12:50 +03:00
poma a04fe966f7
change index logic, fixes CVF-38. Update circom version. 2021-03-21 02:26:54 +03:00
poma 9321740be7
add events 2021-03-21 00:45:14 +03:00
poma a3da216e03
docs 2021-03-20 23:50:59 +03:00
poma 87ce87532e
docs 2021-03-20 23:48:26 +03:00
poma a8b93f6d5b
lint 2021-03-20 14:38:28 +03:00
poma 7d076e8af1
fix CVF-36 2021-03-20 14:07:22 +03:00
poma 0895afbc30
rename utils to TreeUpdateArgsHasher 2021-03-20 14:07:18 +03:00
poma e8954ec6ce
add nItems var, fixes CVF-30 2021-03-19 23:04:43 +03:00
poma 73b9369abc
remove redundant check, fixes CVF-18, CVF-19 2021-03-19 22:15:58 +03:00
poma 628d3bd64a
add explicit _admin arg 2021-03-19 21:52:00 +03:00
poma b8c403b435
bump version 2021-03-16 22:42:22 +03:00
Alexey 44b3db0ce4 Merge branch 'master' of github.com:tornadocash/tornado-trees 2021-03-15 00:53:48 +04:00
poma 5492acd75c
lint 2021-03-06 15:21:44 +03:00
poma 55937915c4
readme 2021-03-06 15:08:11 +03:00
poma 272c7a94e0
make findArrayLength internal 2021-03-06 15:05:54 +03:00
10 changed files with 278 additions and 182 deletions

View File

@ -24,14 +24,10 @@ $ npx hardhat node --fork <https://eth-mainnet.alchemyapi.io/v2/API_KEY> --fork-
$ npx hardhat test
```
## Checklist for batch size changing
find and replace the `CHUNK_TREE_HEIGHT = ` in following files
1. `circuits/BatchTreeUpdate.circom`
2. `contracts/TornadoTrees.sol`
3. `tornadoTrees.test.js`
## build large circuits
1. docker build . -t tornadocash/tornado-trees
Make sure you have enough RAM
```bash
docker build . -t tornadocash/tornado-trees
```

View File

@ -1,14 +1,16 @@
include "../node_modules/circomlib/circuits/poseidon.circom";
include "../node_modules/circomlib/circuits/bitify.circom";
include "./MerkleTreeUpdater.circom";
include "./Utils.circom";
include "./TreeUpdateArgsHasher.circom";
// Computes hashes of the next tree layer
template TreeLayer(height) {
signal input ins[1 << (height + 1)];
signal output outs[1 << height];
var nItems = 1 << height;
signal input ins[nItems * 2];
signal output outs[nItems];
component hash[1 << height];
for(var i = 0; i < (1 << height); i++) {
component hash[nItems];
for(var i = 0; i < nItems; i++) {
hash[i] = HashLeftRight();
hash[i].left <== ins[i * 2];
hash[i].right <== ins[i * 2 + 1];
@ -18,6 +20,8 @@ template TreeLayer(height) {
// Inserts a leaf batch into a tree
// Checks that tree previously contained zero leaves in the same position
// Hashes leaves with Poseidon hash
// `batchLevels` should be less than `levels`
template BatchTreeUpdate(levels, batchLevels, zeroBatchLeaf) {
var height = levels - batchLevels;
var nLeaves = 1 << batchLevels;

View File

@ -1,17 +1,19 @@
include "../node_modules/circomlib/circuits/bitify.circom";
include "../node_modules/circomlib/circuits/sha256/sha256.circom";
// Computes a SHA256 hash of all inputs packed into a byte array
// Field elements are padded to 256 bits with zeroes
template TreeUpdateArgsHasher(nLeaves) {
signal private input oldRoot;
signal private input newRoot;
signal private input pathIndices;
signal private input instances[nLeaves];
signal private input hashes[nLeaves];
signal private input blocks[nLeaves];
signal input oldRoot;
signal input newRoot;
signal input pathIndices;
signal input instances[nLeaves];
signal input hashes[nLeaves];
signal input blocks[nLeaves];
signal output out;
var header = 256 + 256 + 32;
var bitsPerLeaf = 160 + 256 + 32;
var bitsPerLeaf = 256 + 160 + 32;
component hasher = Sha256(header + nLeaves * bitsPerLeaf);
// the range check on old root is optional, it's enforced by smart contract anyway
@ -26,18 +28,20 @@ template TreeUpdateArgsHasher(nLeaves) {
bitsNewRoot.in <== newRoot;
bitsPathIndices.in <== pathIndices;
hasher.in[0] <== 0;
hasher.in[1] <== 0;
var index = 0;
hasher.in[index++] <== 0;
hasher.in[index++] <== 0;
for(var i = 0; i < 254; i++) {
hasher.in[i + 2] <== bitsOldRoot.out[253 - i];
hasher.in[index++] <== bitsOldRoot.out[253 - i];
}
hasher.in[256] <== 0;
hasher.in[257] <== 0;
hasher.in[index++] <== 0;
hasher.in[index++] <== 0;
for(var i = 0; i < 254; i++) {
hasher.in[i + 258] <== bitsNewRoot.out[253 - i];
hasher.in[index++] <== bitsNewRoot.out[253 - i];
}
for(var i = 0; i < 32; i++) {
hasher.in[i + 512] <== bitsPathIndices.out[31 - i];
hasher.in[index++] <== bitsPathIndices.out[31 - i];
}
for(var leaf = 0; leaf < nLeaves; leaf++) {
// the range check on hash is optional, it's enforced by the smart contract anyway
@ -47,16 +51,16 @@ template TreeUpdateArgsHasher(nLeaves) {
bitsHash[leaf].in <== hashes[leaf];
bitsInstance[leaf].in <== instances[leaf];
bitsBlock[leaf].in <== blocks[leaf];
hasher.in[header + leaf * bitsPerLeaf + 0] <== 0;
hasher.in[header + leaf * bitsPerLeaf + 1] <== 0;
hasher.in[index++] <== 0;
hasher.in[index++] <== 0;
for(var i = 0; i < 254; i++) {
hasher.in[header + leaf * bitsPerLeaf + i + 2] <== bitsHash[leaf].out[253 - i];
hasher.in[index++] <== bitsHash[leaf].out[253 - i];
}
for(var i = 0; i < 160; i++) {
hasher.in[header + leaf * bitsPerLeaf + i + 256] <== bitsInstance[leaf].out[159 - i];
hasher.in[index++] <== bitsInstance[leaf].out[159 - i];
}
for(var i = 0; i < 32; i++) {
hasher.in[header + leaf * bitsPerLeaf + i + 416] <== bitsBlock[leaf].out[31 - i];
hasher.in[index++] <== bitsBlock[leaf].out[31 - i];
}
}
component b2n = Bits2Num(256);

View File

@ -10,7 +10,11 @@ contract AdminUpgradeableProxy is TransparentUpgradeableProxy {
/**
* @dev Initializes an upgradeable proxy backed by the implementation at `_logic`.
*/
constructor(address _logic, bytes memory _data) public payable TransparentUpgradeableProxy(_logic, msg.sender, _data) {}
constructor(
address _logic,
address _admin,
bytes memory _data
) public payable TransparentUpgradeableProxy(_logic, _admin, _data) {}
/**
* @dev Override to allow admin access the fallback function.

View File

@ -7,6 +7,7 @@ import "./interfaces/ITornadoTreesV1.sol";
import "./interfaces/IBatchTreeUpdateVerifier.sol";
import "@openzeppelin/upgrades-core/contracts/Initializable.sol";
/// @dev This contract holds a merkle tree of all tornado cash deposit and withdrawal events
contract TornadoTrees is Initializable {
address public immutable governance;
bytes32 public depositRoot;
@ -17,7 +18,6 @@ contract TornadoTrees is Initializable {
IBatchTreeUpdateVerifier public treeUpdateVerifier;
ITornadoTreesV1 public immutable tornadoTreesV1;
// make sure CHUNK_TREE_HEIGHT has the same value in BatchTreeUpdate.circom
uint256 public constant CHUNK_TREE_HEIGHT = 8;
uint256 public constant CHUNK_SIZE = 2**CHUNK_TREE_HEIGHT;
uint256 public constant ITEM_SIZE = 32 + 20 + 4;
@ -36,6 +36,8 @@ contract TornadoTrees is Initializable {
event DepositData(address instance, bytes32 indexed hash, uint256 block, uint256 index);
event WithdrawalData(address instance, bytes32 indexed hash, uint256 block, uint256 index);
event VerifierUpdated(address newVerifier);
event ProxyUpdated(address newProxy);
struct TreeLeaf {
bytes32 hash;
@ -100,18 +102,140 @@ contract TornadoTrees is Initializable {
withdrawalsLength = withdrawalsV1Length;
}
// todo make things internal
/// @dev Queue a new deposit data to be inserted into a merkle tree
function registerDeposit(address _instance, bytes32 _commitment) public onlyTornadoProxy {
uint256 _depositsLength = depositsLength;
deposits[_depositsLength] = keccak256(abi.encode(_instance, _commitment, blockNumber()));
emit DepositData(_instance, _commitment, blockNumber(), _depositsLength);
depositsLength = _depositsLength + 1;
}
/// @dev Queue a new withdrawal data to be inserted into a merkle tree
function registerWithdrawal(address _instance, bytes32 _nullifierHash) public onlyTornadoProxy {
uint256 _withdrawalsLength = withdrawalsLength;
withdrawals[_withdrawalsLength] = keccak256(abi.encode(_instance, _nullifierHash, blockNumber()));
emit WithdrawalData(_instance, _nullifierHash, blockNumber(), _withdrawalsLength);
withdrawalsLength = _withdrawalsLength + 1;
}
/// @dev Insert a full batch of queued deposits into a merkle tree
/// @param _proof A snark proof that elements were inserted correctly
/// @param _argsHash A hash of snark inputs
/// @param _currentRoot Current merkle tree root
/// @param _newRoot Updated merkle tree root
/// @param _pathIndices Merkle path to inserted batch
/// @param _events A batch of inserted events (leaves)
function updateDepositTree(
bytes calldata _proof,
bytes32 _argsHash,
bytes32 _currentRoot,
bytes32 _newRoot,
uint32 _pathIndices,
TreeLeaf[CHUNK_SIZE] calldata _events
) public {
uint256 offset = lastProcessedDepositLeaf;
require(_currentRoot == depositRoot, "Proposed deposit root is invalid");
require(_pathIndices == offset >> CHUNK_TREE_HEIGHT, "Incorrect deposit insert index");
bytes memory data = new bytes(BYTES_SIZE);
assembly {
mstore(add(data, 0x44), _pathIndices)
mstore(add(data, 0x40), _newRoot)
mstore(add(data, 0x20), _currentRoot)
}
for (uint256 i = 0; i < CHUNK_SIZE; i++) {
(bytes32 hash, address instance, uint32 blockNumber) = (_events[i].hash, _events[i].instance, _events[i].block);
bytes32 leafHash = keccak256(abi.encode(instance, hash, blockNumber));
bytes32 deposit = offset + i >= depositsV1Length ? deposits[offset + i] : tornadoTreesV1.deposits(offset + i);
require(leafHash == deposit, "Incorrect deposit");
assembly {
let itemOffset := add(data, mul(ITEM_SIZE, i))
mstore(add(itemOffset, 0x7c), blockNumber)
mstore(add(itemOffset, 0x78), instance)
mstore(add(itemOffset, 0x64), hash)
}
if (offset + i >= depositsV1Length) {
delete deposits[offset + i];
} else {
emit DepositData(instance, hash, blockNumber, offset + i);
}
}
uint256 argsHash = uint256(sha256(data)) % SNARK_FIELD;
require(argsHash == uint256(_argsHash), "Invalid args hash");
require(treeUpdateVerifier.verifyProof(_proof, [argsHash]), "Invalid deposit tree update proof");
previousDepositRoot = _currentRoot;
depositRoot = _newRoot;
lastProcessedDepositLeaf = offset + CHUNK_SIZE;
}
/// @dev Insert a full batch of queued withdrawals into a merkle tree
/// @param _proof A snark proof that elements were inserted correctly
/// @param _argsHash A hash of snark inputs
/// @param _currentRoot Current merkle tree root
/// @param _newRoot Updated merkle tree root
/// @param _pathIndices Merkle path to inserted batch
/// @param _events A batch of inserted events (leaves)
function updateWithdrawalTree(
bytes calldata _proof,
bytes32 _argsHash,
bytes32 _currentRoot,
bytes32 _newRoot,
uint32 _pathIndices,
TreeLeaf[CHUNK_SIZE] calldata _events
) public {
uint256 offset = lastProcessedWithdrawalLeaf;
require(_currentRoot == withdrawalRoot, "Proposed withdrawal root is invalid");
require(_pathIndices == offset >> CHUNK_TREE_HEIGHT, "Incorrect withdrawal insert index");
bytes memory data = new bytes(BYTES_SIZE);
assembly {
mstore(add(data, 0x44), _pathIndices)
mstore(add(data, 0x40), _newRoot)
mstore(add(data, 0x20), _currentRoot)
}
for (uint256 i = 0; i < CHUNK_SIZE; i++) {
(bytes32 hash, address instance, uint32 blockNumber) = (_events[i].hash, _events[i].instance, _events[i].block);
bytes32 leafHash = keccak256(abi.encode(instance, hash, blockNumber));
bytes32 withdrawal = offset + i >= withdrawalsV1Length ? withdrawals[offset + i] : tornadoTreesV1.withdrawals(offset + i);
require(leafHash == withdrawal, "Incorrect withdrawal");
assembly {
let itemOffset := add(data, mul(ITEM_SIZE, i))
mstore(add(itemOffset, 0x7c), blockNumber)
mstore(add(itemOffset, 0x78), instance)
mstore(add(itemOffset, 0x64), hash)
}
if (offset + i >= withdrawalsV1Length) {
delete withdrawals[offset + i];
} else {
emit WithdrawalData(instance, hash, blockNumber, offset + i);
}
}
uint256 argsHash = uint256(sha256(data)) % SNARK_FIELD;
require(argsHash == uint256(_argsHash), "Invalid args hash");
require(treeUpdateVerifier.verifyProof(_proof, [argsHash]), "Invalid withdrawal tree update proof");
previousWithdrawalRoot = _currentRoot;
withdrawalRoot = _newRoot;
lastProcessedWithdrawalLeaf = offset + CHUNK_SIZE;
}
function validateRoots(bytes32 _depositRoot, bytes32 _withdrawalRoot) public view {
require(_depositRoot == depositRoot || _depositRoot == previousDepositRoot, "Incorrect deposit tree root");
require(_withdrawalRoot == withdrawalRoot || _withdrawalRoot == previousWithdrawalRoot, "Incorrect withdrawal tree root");
}
/// @dev There is no array length getter for deposit and withdrawal arrays
/// in previous contract, so we have to find them length manually
/// in the previous contract, so we have to find them length manually.
/// Used only during deployment
function findArrayLength(
ITornadoTreesV1 _tornadoTreesV1,
string memory _type,
uint256 _from, // most likely array length after the proposal has passed
uint256 _step // optimal step size to find first match, approximately equals dispersion
) public view returns (uint256) {
if (_from == 0 && _step == 0) {
return 0; // for tests
}
) internal view virtual returns (uint256) {
// Find the segment with correct array length
bool direction = elementExists(_tornadoTreesV1, _type, _from);
do {
@ -142,137 +266,14 @@ contract TornadoTrees is Initializable {
(success, ) = address(_tornadoTreesV1).staticcall{ gas: 2500 }(abi.encodeWithSignature(_type, index));
}
function registerDeposit(address _instance, bytes32 _commitment) public onlyTornadoProxy {
uint256 _depositsLength = depositsLength;
deposits[_depositsLength] = keccak256(abi.encode(_instance, _commitment, blockNumber()));
emit DepositData(_instance, _commitment, blockNumber(), _depositsLength);
depositsLength = _depositsLength + 1;
}
function registerWithdrawal(address _instance, bytes32 _nullifierHash) public onlyTornadoProxy {
uint256 _withdrawalsLength = withdrawalsLength;
withdrawals[_withdrawalsLength] = keccak256(abi.encode(_instance, _nullifierHash, blockNumber()));
emit WithdrawalData(_instance, _nullifierHash, blockNumber(), _withdrawalsLength);
withdrawalsLength = _withdrawalsLength + 1;
}
function updateDepositTree(
bytes calldata _proof,
bytes32 _argsHash,
bytes32 _currentRoot,
bytes32 _newRoot,
uint32 _pathIndices,
TreeLeaf[CHUNK_SIZE] calldata _events
) public {
uint256 offset = lastProcessedDepositLeaf;
require(_newRoot != previousDepositRoot, "Outdated deposit root");
require(_currentRoot == depositRoot, "Proposed deposit root is invalid");
require(_pathIndices == offset >> CHUNK_TREE_HEIGHT, "Incorrect deposit insert index");
bytes memory data = new bytes(BYTES_SIZE);
assembly {
mstore(add(data, 0x44), _pathIndices)
mstore(add(data, 0x40), _newRoot)
mstore(add(data, 0x20), _currentRoot)
}
for (uint256 i = 0; i < CHUNK_SIZE; i++) {
(bytes32 hash, address instance, uint32 blockNumber) = (_events[i].hash, _events[i].instance, _events[i].block);
bytes32 leafHash = keccak256(abi.encode(instance, hash, blockNumber));
bytes32 deposit = offset + i >= depositsV1Length ? deposits[offset + i] : tornadoTreesV1.deposits(offset + i);
require(leafHash == deposit, "Incorrect deposit");
assembly {
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x7c), blockNumber)
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x78), instance)
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x64), hash)
}
if (offset + i >= depositsV1Length) {
delete deposits[offset + i];
} else {
emit DepositData(instance, hash, blockNumber, offset + i);
}
}
uint256 argsHash = uint256(sha256(data)) % SNARK_FIELD;
require(argsHash == uint256(_argsHash), "Invalid args hash");
require(treeUpdateVerifier.verifyProof(_proof, [argsHash]), "Invalid deposit tree update proof");
previousDepositRoot = _currentRoot;
depositRoot = _newRoot;
lastProcessedDepositLeaf = offset + CHUNK_SIZE;
}
function updateWithdrawalTree(
bytes calldata _proof,
bytes32 _argsHash,
bytes32 _currentRoot,
bytes32 _newRoot,
uint32 _pathIndices,
TreeLeaf[CHUNK_SIZE] calldata _events
) public {
uint256 offset = lastProcessedWithdrawalLeaf;
require(_newRoot != previousWithdrawalRoot, "Outdated withdrawal root");
require(_currentRoot == withdrawalRoot, "Proposed withdrawal root is invalid");
require(_pathIndices == offset >> CHUNK_TREE_HEIGHT, "Incorrect withdrawal insert index");
bytes memory data = new bytes(BYTES_SIZE);
assembly {
mstore(add(data, 0x44), _pathIndices)
mstore(add(data, 0x40), _newRoot)
mstore(add(data, 0x20), _currentRoot)
}
for (uint256 i = 0; i < CHUNK_SIZE; i++) {
(bytes32 hash, address instance, uint32 blockNumber) = (_events[i].hash, _events[i].instance, _events[i].block);
bytes32 leafHash = keccak256(abi.encode(instance, hash, blockNumber));
bytes32 withdrawal = offset + i >= withdrawalsV1Length ? withdrawals[offset + i] : tornadoTreesV1.withdrawals(offset + i);
require(leafHash == withdrawal, "Incorrect withdrawal");
assembly {
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x7c), blockNumber)
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x78), instance)
mstore(add(add(data, mul(ITEM_SIZE, i)), 0x64), hash)
}
if (offset + i >= withdrawalsV1Length) {
delete withdrawals[offset + i];
} else {
emit WithdrawalData(instance, hash, blockNumber, offset + i);
}
}
uint256 argsHash = uint256(sha256(data)) % SNARK_FIELD;
require(argsHash == uint256(_argsHash), "Invalid args hash");
require(treeUpdateVerifier.verifyProof(_proof, [argsHash]), "Invalid withdrawal tree update proof");
previousWithdrawalRoot = _currentRoot;
withdrawalRoot = _newRoot;
lastProcessedWithdrawalLeaf = offset + CHUNK_SIZE;
}
function validateRoots(bytes32 _depositRoot, bytes32 _withdrawalRoot) public view {
require(_depositRoot == depositRoot || _depositRoot == previousDepositRoot, "Incorrect deposit tree root");
require(_withdrawalRoot == withdrawalRoot || _withdrawalRoot == previousWithdrawalRoot, "Incorrect withdrawal tree root");
}
function getRegisteredDeposits() external view returns (bytes32[] memory _deposits) {
uint256 count = depositsLength - lastProcessedDepositLeaf;
_deposits = new bytes32[](count);
for (uint256 i = 0; i < count; i++) {
_deposits[i] = deposits[lastProcessedDepositLeaf + i];
}
}
function getRegisteredWithdrawals() external view returns (bytes32[] memory _withdrawals) {
uint256 count = withdrawalsLength - lastProcessedWithdrawalLeaf;
_withdrawals = new bytes32[](count);
for (uint256 i = 0; i < count; i++) {
_withdrawals[i] = withdrawals[lastProcessedWithdrawalLeaf + i];
}
}
function setTornadoProxyContract(address _tornadoProxy) external onlyGovernance {
tornadoProxy = _tornadoProxy;
emit ProxyUpdated(_tornadoProxy);
}
function setVerifierContract(IBatchTreeUpdateVerifier _treeUpdateVerifier) external onlyGovernance {
treeUpdateVerifier = _treeUpdateVerifier;
emit VerifierUpdated(address(_treeUpdateVerifier));
}
function blockNumber() public view virtual returns (uint256) {

View File

@ -24,6 +24,15 @@ contract TornadoTreesMock is TornadoTrees {
return currentBlock == 0 ? block.number : currentBlock;
}
function findArrayLengthMock(
ITornadoTreesV1 _tornadoTreesV1,
string memory _type,
uint256 _from,
uint256 _step
) public view returns (uint256) {
return findArrayLength(_tornadoTreesV1, _type, _from, _step);
}
function register(
address _instance,
bytes32 _commitment,
@ -88,4 +97,32 @@ contract TornadoTreesMock is TornadoTrees {
}
return data;
}
function getRegisteredDeposits() external view returns (bytes32[] memory _deposits) {
uint256 count = depositsLength - lastProcessedDepositLeaf;
_deposits = new bytes32[](count);
for (uint256 i = 0; i < count; i++) {
_deposits[i] = deposits[lastProcessedDepositLeaf + i];
}
}
function getRegisteredWithdrawals() external view returns (bytes32[] memory _withdrawals) {
uint256 count = withdrawalsLength - lastProcessedWithdrawalLeaf;
_withdrawals = new bytes32[](count);
for (uint256 i = 0; i < count; i++) {
_withdrawals[i] = withdrawals[lastProcessedWithdrawalLeaf + i];
}
}
function findArrayLength(
ITornadoTreesV1 _tornadoTreesV1,
string memory _type,
uint256 _from, // most likely array length after the proposal has passed
uint256 _step // optimal step size to find first match, approximately equals dispersion
) internal view override returns (uint256) {
if (_from == 0 && _step == 0) {
return 0;
}
return super.findArrayLength(_tornadoTreesV1, _type, _from, _step);
}
}

View File

@ -1,6 +1,6 @@
{
"name": "tornado-trees",
"version": "0.0.9",
"version": "0.0.11",
"main": "src/index.js",
"repository": "https://github.com/tornadocash/tornado-trees.git",
"author": "Tornadocash team <hello@tornado.cash>",
@ -42,7 +42,7 @@
"dependencies": {
"@openzeppelin/contracts": "^3.4.0",
"@openzeppelin/upgrades-core": "^1.5.1",
"circom": "^0.5.38",
"circom": "0.5.42",
"circom_runtime": "^0.1.12",
"circomlib": "git+https://github.com/tornadocash/circomlib.git#d20d53411d1bef61f38c99a8b36d5d0cc4836aa1",
"dotenv": "^8.2.0",

View File

@ -42,7 +42,14 @@ function prove(input, keyBasePath) {
fs.writeFileSync(`${dir}/input.json`, JSON.stringify(input, null, 2))
out = await exec(`${keyBasePath} ${dir}/input.json ${dir}/witness.json`)
} else {
await wtns.calculate(utils.unstringifyBigInts(input), `${keyBasePath}.wasm`, `${dir}/witness.wtns`)
await wtns.debug(
utils.unstringifyBigInts(input),
`${keyBasePath}.wasm`,
`${dir}/witness.wtns`,
`${keyBasePath}.sym`,
{},
console,
)
const witness = utils.stringifyBigInts(await wtns.exportJson(`${dir}/witness.wtns`))
fs.writeFileSync(`${dir}/witness.json`, JSON.stringify(witness, null, 2))
}
@ -57,6 +64,14 @@ function prove(input, keyBasePath) {
})
}
/**
* Generates inputs for a snark and tornado trees smart contract.
* This function updates MerkleTree argument
*
* @param tree Merkle tree with current smart contract state. This object is mutated during function execution.
* @param events New batch of events to insert.
* @returns {{args: [string, string, string, string, *], input: {pathElements: *, instances: *, blocks: *, newRoot: *, hashes: *, oldRoot: *, pathIndices: string}}}
*/
function batchTreeUpdate(tree, events) {
const batchHeight = Math.log2(events.length)
if (!Number.isInteger(batchHeight)) {

View File

@ -28,40 +28,70 @@ describe('findArrayLength', () => {
})
it('should work for even array', async () => {
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 4, 2)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
4,
2,
)
expect(depositsLength).to.be.equal(depositsEven.length)
})
it('should work for empty array', async () => {
publicArray = await PublicArray.deploy()
// will throw out of gas if you pass non zero params
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 0, 0)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
0,
0,
)
expect(depositsLength).to.be.equal(0)
})
it('should work for odd array', async () => {
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(depositsOdd)
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 4, 2)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
4,
2,
)
expect(depositsLength).to.be.equal(depositsOdd.length)
})
it('should work for even array and odd step', async () => {
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 4, 3)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
4,
3,
)
expect(depositsLength).to.be.equal(depositsEven.length)
})
it('should work for odd array and odd step', async () => {
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(depositsOdd)
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 4, 3)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
4,
3,
)
expect(depositsLength).to.be.equal(depositsOdd.length)
})
it('should work for odd array and step 1', async () => {
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(depositsOdd)
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 4, 1)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
4,
1,
)
expect(depositsLength).to.be.equal(depositsOdd.length)
})
@ -69,7 +99,7 @@ describe('findArrayLength', () => {
const deposits = Array.from(Array(100).keys())
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(deposits)
const depositsLength = await tornadoTrees.findArrayLength(
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
67,
@ -82,7 +112,12 @@ describe('findArrayLength', () => {
const deposits = Array.from(Array(30).keys())
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(deposits)
const depositsLength = await tornadoTrees.findArrayLength(publicArray.address, 'deposits(uint256)', 1, 50)
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
1,
50,
)
expect(depositsLength).to.be.equal(deposits.length)
})
@ -100,7 +135,7 @@ describe('findArrayLength', () => {
const deposits = Array.from(Array(len).keys())
publicArray = await PublicArray.deploy()
await publicArray.setDeposits(deposits)
const depositsLength = await tornadoTrees.findArrayLength(
const depositsLength = await tornadoTrees.findArrayLengthMock(
publicArray.address,
'deposits(uint256)',
days * depositsPerDay,

View File

@ -2241,10 +2241,10 @@ circom@0.5.33:
tmp-promise "^2.0.2"
wasmbuilder "0.0.10"
circom@^0.5.38:
version "0.5.38"
resolved "https://registry.yarnpkg.com/circom/-/circom-0.5.38.tgz#c099fb196085837575fb266f37b0516b1ec56eb5"
integrity sha512-PFlXto8gDysUlwk6z/GYbn1Mv5BtW9BI4769N9gSP0/7KDNSqLNyVmL4DgMLc67/EpG4qJLGch3SdgzQD+/cfw==
circom@0.5.42:
version "0.5.42"
resolved "https://registry.yarnpkg.com/circom/-/circom-0.5.42.tgz#96a456f9538f4425654df091d15e3158e9da2acc"
integrity sha512-v6+f9g3z2ia17NQvQmyZjvh8cE8O3GtxRE36KfJfx/a+s58Y7aEDWsUG+GFRJhp1ajiQELdj3NehY9vHSf5Rkg==
dependencies:
chai "^4.2.0"
circom_runtime "0.1.12"