mirror of
https://github.com/blockchain-etl/ethereum-etl.git
synced 2026-01-09 22:07:54 -05:00
added tox tests, fixed some incompatibilities with python 3.5
This commit is contained in:
@@ -46,8 +46,8 @@ def read_resource(resource_group, file_name):
|
||||
skip_if_slow_tests_disabled((47218, 47219, 2, 'blocks_with_transactions', 'infura')),
|
||||
])
|
||||
def test_export_blocks_job(tmpdir, start_block, end_block, batch_size, resource_group, web3_provider_type):
|
||||
blocks_output_file = tmpdir.join('actual_blocks.csv')
|
||||
transactions_output_file = tmpdir.join('actual_transactions.csv')
|
||||
blocks_output_file = str(tmpdir.join('actual_blocks.csv'))
|
||||
transactions_output_file = str(tmpdir.join('actual_transactions.csv'))
|
||||
|
||||
job = ExportBlocksJob(
|
||||
start_block=start_block, end_block=end_block, batch_size=batch_size,
|
||||
|
||||
@@ -46,7 +46,7 @@ CONTRACT_ADDRESSES_UNDER_TEST = ['0x06012c8cf97bead5deae237070f9587f8e7a266d']
|
||||
])
|
||||
def test_export_contracts_job(tmpdir, batch_size, contract_addresses, output_format, resource_group,
|
||||
web3_provider_type):
|
||||
contracts_output_file = tmpdir.join('actual_contracts.' + output_format)
|
||||
contracts_output_file = str(tmpdir.join('actual_contracts.' + output_format))
|
||||
|
||||
job = ExportContractsJob(
|
||||
contract_addresses_iterable=contract_addresses,
|
||||
|
||||
@@ -47,7 +47,7 @@ def read_resource(resource_group, file_name):
|
||||
(1000895, 1000895, 'block_with_error', 'mock'),
|
||||
])
|
||||
def test_export_geth_traces_job(tmpdir, start_block, end_block, resource_group, web3_provider_type):
|
||||
traces_output_file = tmpdir.join('actual_geth_traces.json')
|
||||
traces_output_file = str(tmpdir.join('actual_geth_traces.json'))
|
||||
|
||||
job = ExportGethTracesJob(
|
||||
start_block=start_block, end_block=end_block, batch_size=1,
|
||||
|
||||
@@ -51,8 +51,8 @@ DEFAULT_TX_HASHES = ['0x04cbcb236043d8fb7839e07bbc7f5eed692fb2ca55d897f1101eac3e
|
||||
skip_if_slow_tests_disabled((2, DEFAULT_TX_HASHES, 'json', 'receipts_with_logs', 'infura'))
|
||||
])
|
||||
def test_export_receipts_job(tmpdir, batch_size, transaction_hashes, output_format, resource_group, web3_provider_type):
|
||||
receipts_output_file = tmpdir.join('actual_receipts.' + output_format)
|
||||
logs_output_file = tmpdir.join('actual_logs.' + output_format)
|
||||
receipts_output_file = str(tmpdir.join('actual_receipts.' + output_format))
|
||||
logs_output_file = str(tmpdir.join('actual_logs.' + output_format))
|
||||
|
||||
job = ExportReceiptsJob(
|
||||
transaction_hashes_iterable=transaction_hashes,
|
||||
|
||||
@@ -42,7 +42,7 @@ def read_resource(resource_group, file_name):
|
||||
(483920, 483920, 1, 'block_with_transfers', 'mock')
|
||||
])
|
||||
def test_export_token_transfers_job(tmpdir, start_block, end_block, batch_size, resource_group, web3_provider_type):
|
||||
output_file = tmpdir.join('token_transfers.csv')
|
||||
output_file = str(tmpdir.join('token_transfers.csv'))
|
||||
|
||||
job = ExportTokenTransfersJob(
|
||||
start_block=start_block, end_block=end_block, batch_size=batch_size,
|
||||
|
||||
@@ -46,7 +46,7 @@ def read_resource(resource_group, file_name):
|
||||
)
|
||||
])
|
||||
def test_export_tokens_job(tmpdir, token_addresses, resource_group, web3_provider_type):
|
||||
output_file = tmpdir.join('tokens.csv')
|
||||
output_file = str(tmpdir.join('tokens.csv'))
|
||||
|
||||
job = ExportTokensJob(
|
||||
token_addresses_iterable=token_addresses,
|
||||
|
||||
@@ -46,7 +46,7 @@ def read_resource(resource_group, file_name):
|
||||
(1000895, 1000895, 'block_with_error', 'mock'),
|
||||
])
|
||||
def test_export_traces_job(tmpdir, start_block, end_block, resource_group, web3_provider_type):
|
||||
traces_output_file = tmpdir.join('actual_traces.csv')
|
||||
traces_output_file = str(tmpdir.join('actual_traces.csv'))
|
||||
|
||||
job = ExportTracesJob(
|
||||
start_block=start_block, end_block=end_block, batch_size=1,
|
||||
|
||||
@@ -44,7 +44,7 @@ def read_resource(resource_group, file_name):
|
||||
'block_with_error',
|
||||
])
|
||||
def test_extract_traces_job(tmpdir, resource_group):
|
||||
output_file = tmpdir.join('actual_traces.csv')
|
||||
output_file = str(tmpdir.join('actual_traces.csv'))
|
||||
|
||||
geth_traces_content = read_resource(resource_group, 'geth_traces.json')
|
||||
traces_iterable = (json.loads(line) for line in geth_traces_content.splitlines())
|
||||
|
||||
@@ -41,7 +41,7 @@ def read_resource(resource_group, file_name):
|
||||
'logs'
|
||||
])
|
||||
def test_export_token_transfers_job(tmpdir, resource_group):
|
||||
output_file = tmpdir.join('token_transfers.csv')
|
||||
output_file = str(tmpdir.join('token_transfers.csv'))
|
||||
|
||||
logs_content = read_resource(resource_group, 'logs.csv')
|
||||
logs_csv_reader = csv.DictReader(io.StringIO(logs_content))
|
||||
|
||||
@@ -22,15 +22,26 @@
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def sort_json(json_string):
|
||||
return json.dumps(json.loads(json_string), sort_keys=True)
|
||||
|
||||
|
||||
def compare_lines_ignore_order(expected, actual):
|
||||
expected_lines = expected.splitlines()
|
||||
actual_lines = actual.splitlines()
|
||||
assert len(expected_lines) == len(actual_lines)
|
||||
|
||||
try:
|
||||
expected_lines = [sort_json(line) for line in expected_lines]
|
||||
actual_lines = [sort_json(line) for line in actual_lines]
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
|
||||
for expected_line, actual_line in zip(sorted(expected_lines), sorted(actual_lines)):
|
||||
assert expected_line == actual_line
|
||||
|
||||
|
||||
Reference in New Issue
Block a user