2015-01-02 18:52:46 +02:00
// Copyright (c) 2014-2015, The Monero Project
2014-07-23 16:03:52 +03:00
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2014-03-04 00:07:58 +02:00
# pragma once
# include <boost/serialization/serialization.hpp>
# include <boost/serialization/version.hpp>
# include <boost/serialization/list.hpp>
# include <boost/multi_index_container.hpp>
# include <boost/multi_index/global_fun.hpp>
# include <boost/multi_index/hashed_index.hpp>
# include <boost/multi_index/member.hpp>
# include <boost/foreach.hpp>
# include <atomic>
2014-05-25 20:06:40 +03:00
# include "syncobj.h"
# include "string_tools.h"
2014-03-04 00:07:58 +02:00
# include "tx_pool.h"
# include "cryptonote_basic.h"
# include "common/util.h"
# include "cryptonote_protocol/cryptonote_protocol_defs.h"
# include "rpc/core_rpc_server_commands_defs.h"
# include "difficulty.h"
# include "cryptonote_core/cryptonote_format_utils.h"
# include "verification_context.h"
# include "crypto/hash.h"
# include "checkpoints.h"
namespace cryptonote
{
/************************************************************************/
/* */
/************************************************************************/
class blockchain_storage
{
public :
struct transaction_chain_entry
{
transaction tx ;
uint64_t m_keeper_block_height ;
size_t m_blob_size ;
std : : vector < uint64_t > m_global_output_indexes ;
} ;
struct block_extended_info
{
block bl ;
uint64_t height ;
size_t block_cumulative_size ;
difficulty_type cumulative_difficulty ;
uint64_t already_generated_coins ;
} ;
2014-11-02 02:55:22 +02:00
blockchain_storage ( tx_memory_pool * tx_pool ) : m_tx_pool ( tx_pool ) , m_current_block_cumul_sz_limit ( 0 ) , m_is_in_checkpoint_zone ( false ) , m_is_blockchain_storing ( false ) , m_enforce_dns_checkpoints ( false )
2014-03-04 00:07:58 +02:00
{ } ;
2014-07-16 20:30:15 +03:00
bool init ( ) { return init ( tools : : get_default_data_dir ( ) , true ) ; }
bool init ( const std : : string & config_folder , bool testnet = false ) ;
2014-03-04 00:07:58 +02:00
bool deinit ( ) ;
void set_checkpoints ( checkpoints & & chk_pts ) { m_checkpoints = chk_pts ; }
//bool push_new_block();
2014-12-07 01:35:26 +02:00
bool get_blocks ( uint64_t start_offset , size_t count , std : : list < block > & blocks , std : : list < transaction > & txs ) const ;
bool get_blocks ( uint64_t start_offset , size_t count , std : : list < block > & blocks ) const ;
bool get_alternative_blocks ( std : : list < block > & blocks ) const ;
size_t get_alternative_blocks_count ( ) const ;
crypto : : hash get_block_id_by_height ( uint64_t height ) const ;
bool get_block_by_hash ( const crypto : : hash & h , block & blk ) const ;
void get_all_known_block_ids ( std : : list < crypto : : hash > & main , std : : list < crypto : : hash > & alt , std : : list < crypto : : hash > & invalid ) const ;
2014-03-04 00:07:58 +02:00
template < class archive_t >
void serialize ( archive_t & ar , const unsigned int version ) ;
2014-12-07 01:35:26 +02:00
bool have_tx ( const crypto : : hash & id ) const ;
bool have_tx_keyimges_as_spent ( const transaction & tx ) const ;
bool have_tx_keyimg_as_spent ( const crypto : : key_image & key_im ) const ;
const transaction * get_tx ( const crypto : : hash & id ) const ;
2014-03-04 00:07:58 +02:00
2014-12-07 01:35:26 +02:00
uint64_t get_current_blockchain_height ( ) const ;
crypto : : hash get_tail_id ( ) const ;
crypto : : hash get_tail_id ( uint64_t & height ) const ;
difficulty_type get_difficulty_for_next_block ( ) const ;
2014-03-04 00:07:58 +02:00
bool add_new_block ( const block & bl_ , block_verification_context & bvc ) ;
bool reset_and_set_genesis_block ( const block & b ) ;
2014-12-07 01:35:26 +02:00
bool create_block_template ( block & b , const account_public_address & miner_address , difficulty_type & di , uint64_t & height , const blobdata & ex_nonce ) const ;
bool have_block ( const crypto : : hash & id ) const ;
size_t get_total_transactions ( ) const ;
bool get_outs ( uint64_t amount , std : : list < crypto : : public_key > & pkeys ) const ;
bool get_short_chain_history ( std : : list < crypto : : hash > & ids ) const ;
bool find_blockchain_supplement ( const std : : list < crypto : : hash > & qblock_ids , NOTIFY_RESPONSE_CHAIN_ENTRY : : request & resp ) const ;
bool find_blockchain_supplement ( const std : : list < crypto : : hash > & qblock_ids , uint64_t & starter_offset ) const ;
bool find_blockchain_supplement ( const uint64_t req_start_block , const std : : list < crypto : : hash > & qblock_ids , std : : list < std : : pair < block , std : : list < transaction > > > & blocks , uint64_t & total_height , uint64_t & start_height , size_t max_count ) const ;
2014-03-04 00:07:58 +02:00
bool handle_get_objects ( NOTIFY_REQUEST_GET_OBJECTS : : request & arg , NOTIFY_RESPONSE_GET_OBJECTS : : request & rsp ) ;
bool handle_get_objects ( const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS : : request & req , COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS : : response & res ) ;
2014-12-07 01:35:26 +02:00
bool get_random_outs_for_amounts ( const COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS : : request & req , COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS : : response & res ) const ;
bool get_backward_blocks_sizes ( size_t from_height , std : : vector < size_t > & sz , size_t count ) const ;
bool get_tx_outputs_gindexs ( const crypto : : hash & tx_id , std : : vector < uint64_t > & indexs ) const ;
2014-03-04 00:07:58 +02:00
bool store_blockchain ( ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 23:09:32 +03:00
2014-12-07 01:35:26 +02:00
bool check_tx_inputs ( const transaction & tx , uint64_t & pmax_used_block_height , crypto : : hash & max_used_block_id ) const ;
2015-01-26 07:36:09 +02:00
uint64_t get_current_cumulative_blocksize_limit ( ) const ;
2014-12-07 01:35:26 +02:00
bool is_storing_blockchain ( ) const { return m_is_blockchain_storing ; }
uint64_t block_difficulty ( size_t i ) const ;
2015-04-08 00:56:18 +03:00
double get_avg_block_size ( size_t count ) const ;
2014-03-04 00:07:58 +02:00
template < class t_ids_container , class t_blocks_container , class t_missed_container >
2014-12-07 01:35:26 +02:00
bool get_blocks ( const t_ids_container & block_ids , t_blocks_container & blocks , t_missed_container & missed_bs ) const
2014-03-04 00:07:58 +02:00
{
CRITICAL_REGION_LOCAL ( m_blockchain_lock ) ;
BOOST_FOREACH ( const auto & bl_id , block_ids )
{
auto it = m_blocks_index . find ( bl_id ) ;
if ( it = = m_blocks_index . end ( ) )
missed_bs . push_back ( bl_id ) ;
else
{
2014-05-25 20:06:40 +03:00
CHECK_AND_ASSERT_MES ( it - > second < m_blocks . size ( ) , false , " Internal error: bl_id= " < < epee : : string_tools : : pod_to_hex ( bl_id )
2014-03-04 00:07:58 +02:00
< < " have index record with offset= " < < it - > second < < " , bigger then m_blocks.size()= " < < m_blocks . size ( ) ) ;
blocks . push_back ( m_blocks [ it - > second ] . bl ) ;
}
}
return true ;
}
template < class t_ids_container , class t_tx_container , class t_missed_container >
2014-12-07 01:35:26 +02:00
bool get_transactions ( const t_ids_container & txs_ids , t_tx_container & txs , t_missed_container & missed_txs ) const
2014-03-04 00:07:58 +02:00
{
CRITICAL_REGION_LOCAL ( m_blockchain_lock ) ;
BOOST_FOREACH ( const auto & tx_id , txs_ids )
{
auto it = m_transactions . find ( tx_id ) ;
if ( it = = m_transactions . end ( ) )
{
transaction tx ;
2014-11-02 02:55:22 +02:00
if ( ! m_tx_pool - > get_transaction ( tx_id , tx ) )
2014-03-04 00:07:58 +02:00
missed_txs . push_back ( tx_id ) ;
else
txs . push_back ( tx ) ;
}
else
txs . push_back ( it - > second . tx ) ;
}
return true ;
}
//debug functions
2015-09-19 13:25:57 +03:00
void print_blockchain ( uint64_t start_index , uint64_t end_index ) const ;
void print_blockchain_index ( ) const ;
void print_blockchain_outs ( const std : : string & file ) const ;
2014-12-07 01:35:26 +02:00
void check_against_checkpoints ( const checkpoints & points , bool enforce ) ;
2014-09-29 23:30:47 +03:00
bool update_checkpoints ( const std : : string & file_path , bool check_dns ) ;
2014-09-26 08:01:58 +03:00
void set_enforce_dns_checkpoints ( bool enforce_checkpoints ) ;
2014-03-04 00:07:58 +02:00
2014-12-07 01:35:26 +02:00
block get_block ( uint64_t height ) const { return m_blocks [ height ] . bl ; }
size_t get_block_size ( uint64_t height ) const { return m_blocks [ height ] . block_cumulative_size ; }
difficulty_type get_block_cumulative_difficulty ( uint64_t height ) const { return m_blocks [ height ] . cumulative_difficulty ; }
uint64_t get_block_coins_generated ( uint64_t height ) const { return m_blocks [ height ] . already_generated_coins ; }
2014-11-02 02:55:22 +02:00
2015-10-25 12:45:25 +02:00
bool for_all_key_images ( std : : function < bool ( const crypto : : key_image & ) > ) const ;
bool for_all_blocks ( std : : function < bool ( uint64_t height , const block & ) > ) const ;
bool for_all_transactions ( std : : function < bool ( const transaction & ) > ) const ;
bool for_all_outputs ( std : : function < bool ( uint64_t amount , const crypto : : hash & tx_hash , size_t tx_idx ) > ) const ;
2015-05-17 05:27:26 +03:00
// use for testing only
bool debug_pop_block_from_blockchain ( ) { return pop_block_from_blockchain ( ) ; }
2014-03-04 00:07:58 +02:00
private :
typedef std : : unordered_map < crypto : : hash , size_t > blocks_by_id_index ;
typedef std : : unordered_map < crypto : : hash , transaction_chain_entry > transactions_container ;
typedef std : : unordered_set < crypto : : key_image > key_images_container ;
typedef std : : vector < block_extended_info > blocks_container ;
typedef std : : unordered_map < crypto : : hash , block_extended_info > blocks_ext_by_hash ;
typedef std : : unordered_map < crypto : : hash , block > blocks_by_hash ;
typedef std : : map < uint64_t , std : : vector < std : : pair < crypto : : hash , size_t > > > outputs_container ; //crypto::hash - tx hash, size_t - index of out in transaction
2014-11-02 02:55:22 +02:00
tx_memory_pool * m_tx_pool ;
2014-12-07 01:35:26 +02:00
mutable epee : : critical_section m_blockchain_lock ; // TODO: add here reader/writer lock
2014-03-04 00:07:58 +02:00
// main chain
blocks_container m_blocks ; // height -> block_extended_info
blocks_by_id_index m_blocks_index ; // crypto::hash -> height
transactions_container m_transactions ;
key_images_container m_spent_keys ;
2014-04-02 19:00:17 +03:00
size_t m_current_block_cumul_sz_limit ;
2014-03-04 00:07:58 +02:00
// all alternative chains
blocks_ext_by_hash m_alternative_chains ; // crypto::hash -> block_extended_info
// some invalid blocks
blocks_ext_by_hash m_invalid_blocks ; // crypto::hash -> block_extended_info
outputs_container m_outputs ;
std : : string m_config_folder ;
checkpoints m_checkpoints ;
std : : atomic < bool > m_is_in_checkpoint_zone ;
2014-03-20 13:46:11 +02:00
std : : atomic < bool > m_is_blockchain_storing ;
2014-03-04 00:07:58 +02:00
2014-09-26 08:01:58 +03:00
bool m_enforce_dns_checkpoints ;
2015-03-01 10:36:46 +02:00
bool m_testnet ;
2014-09-26 08:01:58 +03:00
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 23:09:32 +03:00
// made private for consistency with blockchain.h
template < class visitor_t >
bool scan_outputkeys_for_indexes ( const txin_to_key & tx_in_to_key , visitor_t & vis , uint64_t * pmax_related_block_height = NULL ) const ;
bool check_tx_input ( const txin_to_key & txin , const crypto : : hash & tx_prefix_hash , const std : : vector < crypto : : signature > & sig , uint64_t * pmax_related_block_height = NULL ) const ;
bool check_tx_inputs ( const transaction & tx , const crypto : : hash & tx_prefix_hash , uint64_t * pmax_used_block_height = NULL ) const ;
bool check_tx_inputs ( const transaction & tx , uint64_t * pmax_used_block_height = NULL ) const ;
2014-05-25 20:06:40 +03:00
bool switch_to_alternative_blockchain ( std : : list < blocks_ext_by_hash : : iterator > & alt_chain , bool discard_disconnected_chain ) ;
2014-03-04 00:07:58 +02:00
bool pop_block_from_blockchain ( ) ;
bool purge_block_data_from_blockchain ( const block & b , size_t processed_tx_count ) ;
bool purge_transaction_from_blockchain ( const crypto : : hash & tx_id ) ;
bool purge_transaction_keyimages_from_blockchain ( const transaction & tx , bool strict_check ) ;
bool handle_block_to_main_chain ( const block & bl , block_verification_context & bvc ) ;
bool handle_block_to_main_chain ( const block & bl , const crypto : : hash & id , block_verification_context & bvc ) ;
bool handle_alternative_block ( const block & b , const crypto : : hash & id , block_verification_context & bvc ) ;
2014-12-07 01:35:26 +02:00
difficulty_type get_next_difficulty_for_alternative_chain ( const std : : list < blocks_ext_by_hash : : iterator > & alt_chain , block_extended_info & bei ) const ;
bool prevalidate_miner_transaction ( const block & b , uint64_t height ) const ;
bool validate_miner_transaction ( const block & b , size_t cumulative_block_size , uint64_t fee , uint64_t & base_reward , uint64_t already_generated_coins ) const ;
bool validate_transaction ( const block & b , uint64_t height , const transaction & tx ) const ;
2014-03-04 00:07:58 +02:00
bool rollback_blockchain_switching ( std : : list < block > & original_chain , size_t rollback_height ) ;
2015-03-15 03:27:34 +02:00
bool add_transaction_from_block ( const transaction & tx , const crypto : : hash & tx_id , const crypto : : hash & bl_id , uint64_t bl_height , size_t blob_size ) ;
2014-03-04 00:07:58 +02:00
bool push_transaction_to_global_outs_index ( const transaction & tx , const crypto : : hash & tx_id , std : : vector < uint64_t > & global_indexes ) ;
bool pop_transaction_from_global_index ( const transaction & tx , const crypto : : hash & tx_id ) ;
2014-12-07 01:35:26 +02:00
bool get_last_n_blocks_sizes ( std : : vector < size_t > & sz , size_t count ) const ;
bool add_out_to_get_random_outs ( const std : : vector < std : : pair < crypto : : hash , size_t > > & amount_outs , COMMAND_RPC_GET_RANDOM_OUTPUTS_FOR_AMOUNTS : : outs_for_amount & result_outs , uint64_t amount , size_t i ) const ;
bool is_tx_spendtime_unlocked ( uint64_t unlock_time ) const ;
2014-03-04 00:07:58 +02:00
bool add_block_as_invalid ( const block & bl , const crypto : : hash & h ) ;
bool add_block_as_invalid ( const block_extended_info & bei , const crypto : : hash & h ) ;
2014-12-07 01:35:26 +02:00
size_t find_end_of_allowed_index ( const std : : vector < std : : pair < crypto : : hash , size_t > > & amount_outs ) const ;
bool check_block_timestamp_main ( const block & b ) const ;
bool check_block_timestamp ( std : : vector < uint64_t > timestamps , const block & b ) const ;
uint64_t get_adjusted_time ( ) const ;
bool complete_timestamps_vector ( uint64_t start_height , std : : vector < uint64_t > & timestamps ) const ;
2014-03-04 00:07:58 +02:00
bool update_next_comulative_size_limit ( ) ;
2014-12-07 13:10:24 +02:00
bool store_genesis_block ( bool testnet , bool check_added = false ) ;
2014-03-04 00:07:58 +02:00
} ;
/************************************************************************/
/* */
/************************************************************************/
2014-04-02 19:00:17 +03:00
# define CURRENT_BLOCKCHAIN_STORAGE_ARCHIVE_VER 12
2014-03-04 00:07:58 +02:00
template < class archive_t >
void blockchain_storage : : serialize ( archive_t & ar , const unsigned int version )
{
2014-04-02 19:00:17 +03:00
if ( version < 11 )
2014-03-04 00:07:58 +02:00
return ;
CRITICAL_REGION_LOCAL ( m_blockchain_lock ) ;
ar & m_blocks ;
ar & m_blocks_index ;
ar & m_transactions ;
ar & m_spent_keys ;
ar & m_alternative_chains ;
ar & m_outputs ;
ar & m_invalid_blocks ;
2014-04-02 19:00:17 +03:00
ar & m_current_block_cumul_sz_limit ;
/*serialization bug workaround*/
if ( version > 11 )
{
uint64_t total_check_count = m_blocks . size ( ) + m_blocks_index . size ( ) + m_transactions . size ( ) + m_spent_keys . size ( ) + m_alternative_chains . size ( ) + m_outputs . size ( ) + m_invalid_blocks . size ( ) + m_current_block_cumul_sz_limit ;
if ( archive_t : : is_saving : : value )
{
ar & total_check_count ;
} else
{
uint64_t total_check_count_loaded = 0 ;
ar & total_check_count_loaded ;
if ( total_check_count ! = total_check_count_loaded )
{
LOG_ERROR ( " Blockchain storage data corruption detected. total_count loaded from file = " < < total_check_count_loaded < < " , expected = " < < total_check_count ) ;
LOG_PRINT_L0 ( " Blockchain storage: " < < ENDL < <
" m_blocks: " < < m_blocks . size ( ) < < ENDL < <
" m_blocks_index: " < < m_blocks_index . size ( ) < < ENDL < <
" m_transactions: " < < m_transactions . size ( ) < < ENDL < <
" m_spent_keys: " < < m_spent_keys . size ( ) < < ENDL < <
" m_alternative_chains: " < < m_alternative_chains . size ( ) < < ENDL < <
" m_outputs: " < < m_outputs . size ( ) < < ENDL < <
" m_invalid_blocks: " < < m_invalid_blocks . size ( ) < < ENDL < <
" m_current_block_cumul_sz_limit: " < < m_current_block_cumul_sz_limit ) ;
throw std : : runtime_error ( " Blockchain data corruption " ) ;
}
}
}
LOG_PRINT_L2 ( " Blockchain storage: " < < ENDL < <
" m_blocks: " < < m_blocks . size ( ) < < ENDL < <
" m_blocks_index: " < < m_blocks_index . size ( ) < < ENDL < <
" m_transactions: " < < m_transactions . size ( ) < < ENDL < <
" m_spent_keys: " < < m_spent_keys . size ( ) < < ENDL < <
" m_alternative_chains: " < < m_alternative_chains . size ( ) < < ENDL < <
" m_outputs: " < < m_outputs . size ( ) < < ENDL < <
" m_invalid_blocks: " < < m_invalid_blocks . size ( ) < < ENDL < <
" m_current_block_cumul_sz_limit: " < < m_current_block_cumul_sz_limit ) ;
2014-03-04 00:07:58 +02:00
}
//------------------------------------------------------------------
template < class visitor_t >
2014-12-07 01:35:26 +02:00
bool blockchain_storage : : scan_outputkeys_for_indexes ( const txin_to_key & tx_in_to_key , visitor_t & vis , uint64_t * pmax_related_block_height ) const
2014-03-04 00:07:58 +02:00
{
2014-04-02 19:00:17 +03:00
CRITICAL_REGION_LOCAL ( m_blockchain_lock ) ;
2014-03-04 00:07:58 +02:00
auto it = m_outputs . find ( tx_in_to_key . amount ) ;
if ( it = = m_outputs . end ( ) | | ! tx_in_to_key . key_offsets . size ( ) )
return false ;
std : : vector < uint64_t > absolute_offsets = relative_output_offsets_to_absolute ( tx_in_to_key . key_offsets ) ;
2014-12-07 01:35:26 +02:00
const std : : vector < std : : pair < crypto : : hash , size_t > > & amount_outs_vec = it - > second ;
2014-03-04 00:07:58 +02:00
size_t count = 0 ;
BOOST_FOREACH ( uint64_t i , absolute_offsets )
{
if ( i > = amount_outs_vec . size ( ) )
{
LOG_PRINT_L0 ( " Wrong index in transaction inputs: " < < i < < " , expected maximum " < < amount_outs_vec . size ( ) - 1 ) ;
return false ;
}
2014-12-07 01:35:26 +02:00
transactions_container : : const_iterator tx_it = m_transactions . find ( amount_outs_vec [ i ] . first ) ;
2014-05-25 20:06:40 +03:00
CHECK_AND_ASSERT_MES ( tx_it ! = m_transactions . end ( ) , false , " Wrong transaction id in output indexes: " < < epee : : string_tools : : pod_to_hex ( amount_outs_vec [ i ] . first ) ) ;
2014-03-04 00:07:58 +02:00
CHECK_AND_ASSERT_MES ( amount_outs_vec [ i ] . second < tx_it - > second . tx . vout . size ( ) , false ,
" Wrong index in transaction outputs: " < < amount_outs_vec [ i ] . second < < " , expected less then " < < tx_it - > second . tx . vout . size ( ) ) ;
if ( ! vis . handle_output ( tx_it - > second . tx , tx_it - > second . tx . vout [ amount_outs_vec [ i ] . second ] ) )
{
LOG_PRINT_L0 ( " Failed to handle_output for output no = " < < count < < " , with absolute offset " < < i ) ;
return false ;
}
if ( count + + = = absolute_offsets . size ( ) - 1 & & pmax_related_block_height )
{
if ( * pmax_related_block_height < tx_it - > second . m_keeper_block_height )
* pmax_related_block_height = tx_it - > second . m_keeper_block_height ;
}
}
return true ;
}
}
BOOST_CLASS_VERSION ( cryptonote : : blockchain_storage , CURRENT_BLOCKCHAIN_STORAGE_ARCHIVE_VER )