mirror of
https://codeberg.org/anoncontributorxmr/monero.git
synced 2024-11-23 10:37:37 -07:00
wallet2, RPC: Optimize RPC calls for periodic refresh from 3 down to 1 call [release-v0.18]
This commit is contained in:
parent
ab826008d6
commit
23f782b211
@ -1739,6 +1739,11 @@ namespace cryptonote
|
||||
return true;
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_info(time_t start_time, bool include_sensitive_txes, std::vector<tx_memory_pool::tx_details>& added_txs, std::vector<crypto::hash>& removed_txs, bool& incremental) const
|
||||
{
|
||||
return m_mempool.get_pool_info(start_time, include_sensitive_txes, added_txs, removed_txs, incremental);
|
||||
}
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
bool core::get_pool_transaction_stats(struct txpool_stats& stats, bool include_sensitive_data) const
|
||||
{
|
||||
m_mempool.get_transaction_stats(stats, include_sensitive_data);
|
||||
|
@ -510,6 +510,14 @@ namespace cryptonote
|
||||
bool get_pool_transaction_hashes(std::vector<crypto::hash>& txs, bool include_sensitive_txes = false) const;
|
||||
|
||||
/**
|
||||
* @copydoc tx_memory_pool::get_pool_info
|
||||
* @param include_sensitive_txes include private transactions
|
||||
*
|
||||
* @note see tx_memory_pool::get_pool_info
|
||||
*/
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive_txes, std::vector<tx_memory_pool::tx_details>& added_txs, std::vector<crypto::hash>& removed_txs, bool& incremental) const;
|
||||
|
||||
/**
|
||||
* @copydoc tx_memory_pool::get_transactions
|
||||
* @param include_sensitive_txes include private transactions
|
||||
*
|
||||
|
@ -133,6 +133,12 @@ namespace cryptonote
|
||||
// class code expects unsigned values throughout
|
||||
if (m_next_check < time_t(0))
|
||||
throw std::runtime_error{"Unexpected time_t (system clock) value"};
|
||||
|
||||
m_added_txs_start_time = (time_t)0;
|
||||
m_removed_txs_start_time = (time_t)0;
|
||||
// We don't set these to "now" already here as we don't know how long it takes from construction
|
||||
// of the pool until it "goes to work". It's safer to set when the first actual txs enter the
|
||||
// corresponding lists.
|
||||
}
|
||||
//---------------------------------------------------------------------------------
|
||||
bool tx_memory_pool::add_tx(transaction &tx, /*const crypto::hash& tx_prefix_hash,*/ const crypto::hash &id, const cryptonote::blobdata &blob, size_t tx_weight, tx_verification_context& tvc, relay_method tx_relay, bool relayed, uint8_t version)
|
||||
@ -292,7 +298,7 @@ namespace cryptonote
|
||||
return false;
|
||||
|
||||
m_blockchain.add_txpool_tx(id, blob, meta);
|
||||
m_txs_by_fee_and_receive_time.emplace(std::pair<double, std::time_t>(fee / (double)(tx_weight ? tx_weight : 1), receive_time), id);
|
||||
add_tx_to_transient_lists(id, fee / (double)(tx_weight ? tx_weight : 1), receive_time);
|
||||
lock.commit();
|
||||
}
|
||||
catch (const std::exception &e)
|
||||
@ -363,7 +369,7 @@ namespace cryptonote
|
||||
|
||||
m_blockchain.remove_txpool_tx(id);
|
||||
m_blockchain.add_txpool_tx(id, blob, meta);
|
||||
m_txs_by_fee_and_receive_time.emplace(std::pair<double, std::time_t>(fee / (double)(tx_weight ? tx_weight : 1), receive_time), id);
|
||||
add_tx_to_transient_lists(id, meta.fee / (double)(tx_weight ? tx_weight : 1), receive_time);
|
||||
}
|
||||
lock.commit();
|
||||
}
|
||||
@ -384,7 +390,7 @@ namespace cryptonote
|
||||
|
||||
++m_cookie;
|
||||
|
||||
MINFO("Transaction added to pool: txid " << id << " weight: " << tx_weight << " fee/byte: " << (fee / (double)(tx_weight ? tx_weight : 1)));
|
||||
MINFO("Transaction added to pool: txid " << id << " weight: " << tx_weight << " fee/byte: " << (fee / (double)(tx_weight ? tx_weight : 1)) << ", count: " << m_added_txs_by_id.size());
|
||||
|
||||
prune(m_txpool_max_weight);
|
||||
|
||||
@ -475,7 +481,8 @@ namespace cryptonote
|
||||
reduce_txpool_weight(meta.weight);
|
||||
remove_transaction_keyimages(tx, txid);
|
||||
MINFO("Pruned tx " << txid << " from txpool: weight: " << meta.weight << ", fee/byte: " << it->first.first);
|
||||
m_txs_by_fee_and_receive_time.erase(it--);
|
||||
remove_tx_from_transient_lists(it, txid, !meta.matches(relay_category::broadcasted));
|
||||
it--;
|
||||
changed = true;
|
||||
}
|
||||
catch (const std::exception &e)
|
||||
@ -557,8 +564,7 @@ namespace cryptonote
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
CRITICAL_REGION_LOCAL1(m_blockchain);
|
||||
|
||||
auto sorted_it = find_tx_in_sorted_container(id);
|
||||
|
||||
bool sensitive = false;
|
||||
try
|
||||
{
|
||||
LockedTXN lock(m_blockchain.get_db());
|
||||
@ -589,6 +595,7 @@ namespace cryptonote
|
||||
do_not_relay = meta.do_not_relay;
|
||||
double_spend_seen = meta.double_spend_seen;
|
||||
pruned = meta.pruned;
|
||||
sensitive = !meta.matches(relay_category::broadcasted);
|
||||
|
||||
// remove first, in case this throws, so key images aren't removed
|
||||
m_blockchain.remove_txpool_tx(id);
|
||||
@ -602,8 +609,7 @@ namespace cryptonote
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sorted_it != m_txs_by_fee_and_receive_time.end())
|
||||
m_txs_by_fee_and_receive_time.erase(sorted_it);
|
||||
remove_tx_from_transient_lists(find_tx_in_sorted_container(id), id, sensitive);
|
||||
++m_cookie;
|
||||
return true;
|
||||
}
|
||||
@ -651,6 +657,7 @@ namespace cryptonote
|
||||
td.relayed = meta.relayed;
|
||||
td.do_not_relay = meta.do_not_relay;
|
||||
td.double_spend_seen = meta.double_spend_seen;
|
||||
td.sensitive = !meta.matches(relay_category::broadcasted);
|
||||
}
|
||||
catch (const std::exception &e)
|
||||
{
|
||||
@ -721,15 +728,7 @@ namespace cryptonote
|
||||
(tx_age > CRYPTONOTE_MEMPOOL_TX_FROM_ALT_BLOCK_LIVETIME && meta.kept_by_block) )
|
||||
{
|
||||
LOG_PRINT_L1("Tx " << txid << " removed from tx pool due to outdated, age: " << tx_age );
|
||||
auto sorted_it = find_tx_in_sorted_container(txid);
|
||||
if (sorted_it == m_txs_by_fee_and_receive_time.end())
|
||||
{
|
||||
LOG_PRINT_L1("Removing tx " << txid << " from tx pool, but it was not found in the sorted txs container!");
|
||||
}
|
||||
else
|
||||
{
|
||||
m_txs_by_fee_and_receive_time.erase(sorted_it);
|
||||
}
|
||||
remove_tx_from_transient_lists(find_tx_in_sorted_container(txid), txid, !meta.matches(relay_category::broadcasted));
|
||||
m_timed_out_transactions.insert(txid);
|
||||
remove.push_back(std::make_pair(txid, meta.weight));
|
||||
}
|
||||
@ -883,9 +882,12 @@ namespace cryptonote
|
||||
meta.last_relayed_time = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
m_blockchain.update_txpool_tx(hash, meta);
|
||||
|
||||
// wait until db update succeeds to ensure tx is visible in the pool
|
||||
was_just_broadcasted = !already_broadcasted && meta.matches(relay_category::broadcasted);
|
||||
|
||||
if (was_just_broadcasted)
|
||||
// Make sure the tx gets re-added with an updated time
|
||||
add_tx_to_transient_lists(hash, meta.fee / (double)meta.weight, std::chrono::system_clock::to_time_t(now));
|
||||
}
|
||||
}
|
||||
catch (const std::exception &e)
|
||||
@ -938,6 +940,88 @@ namespace cryptonote
|
||||
}, false, category);
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
bool tx_memory_pool::get_pool_info(time_t start_time, bool include_sensitive, std::vector<tx_details>& added_txs, std::vector<crypto::hash>& removed_txs, bool& incremental) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
CRITICAL_REGION_LOCAL1(m_blockchain);
|
||||
|
||||
incremental = true;
|
||||
if (start_time == (time_t)0)
|
||||
{
|
||||
// Giving no start time means give back whole pool
|
||||
incremental = false;
|
||||
}
|
||||
else if ((m_added_txs_start_time != (time_t)0) && (m_removed_txs_start_time != (time_t)0))
|
||||
{
|
||||
if ((start_time <= m_added_txs_start_time) || (start_time <= m_removed_txs_start_time))
|
||||
{
|
||||
// If either of the two lists do not go back far enough it's not possible to
|
||||
// deliver incremental pool info
|
||||
incremental = false;
|
||||
}
|
||||
// The check uses "<=": We cannot be sure to have ALL txs exactly at start_time, only AFTER that time
|
||||
}
|
||||
else
|
||||
{
|
||||
// Some incremental info still missing completely
|
||||
incremental = false;
|
||||
}
|
||||
|
||||
added_txs.clear();
|
||||
removed_txs.clear();
|
||||
|
||||
if (!incremental)
|
||||
{
|
||||
// Give back the whole pool in 'added_txs'; because calling 'get_transaction_info' right inside the
|
||||
// anonymous method somehow results in an LMDB error with transactions we have to build a list of
|
||||
// ids first and get the full info afterwards
|
||||
std::vector<crypto::hash> txids;
|
||||
const relay_category category = include_sensitive ? relay_category::all : relay_category::broadcasted;
|
||||
m_blockchain.for_all_txpool_txes([&txids](const crypto::hash &txid, const txpool_tx_meta_t &meta, const cryptonote::blobdata_ref *bd){
|
||||
txids.push_back(txid);
|
||||
return true;
|
||||
}, false, category);
|
||||
tx_details details;
|
||||
for (const auto &it: txids)
|
||||
{
|
||||
bool success = get_transaction_info(it, details);
|
||||
if (success)
|
||||
{
|
||||
added_txs.push_back(std::move(details));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Give back incrementally, based on time of entry into the map
|
||||
tx_details details;
|
||||
for (const auto &pit : m_added_txs_by_id)
|
||||
{
|
||||
if (pit.second >= start_time)
|
||||
{
|
||||
bool success = get_transaction_info(pit.first, details);
|
||||
if (success)
|
||||
{
|
||||
if (include_sensitive || !details.sensitive)
|
||||
{
|
||||
added_txs.push_back(std::move(details));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::multimap<time_t, removed_tx_info>::const_iterator rit = m_removed_txs_by_time.lower_bound(start_time);
|
||||
while (rit != m_removed_txs_by_time.end())
|
||||
{
|
||||
if (include_sensitive || !rit->second.sensitive)
|
||||
{
|
||||
removed_txs.push_back(rit->second.txid);
|
||||
}
|
||||
++rit;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
void tx_memory_pool::get_transaction_backlog(std::vector<tx_backlog_entry>& backlog, bool include_sensitive) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
@ -1642,6 +1726,12 @@ namespace cryptonote
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
CRITICAL_REGION_LOCAL1(m_blockchain);
|
||||
|
||||
// Simply throw away incremental info, too difficult to update
|
||||
m_added_txs_by_id.clear();
|
||||
m_added_txs_start_time = (time_t)0;
|
||||
m_removed_txs_by_time.clear();
|
||||
m_removed_txs_start_time = (time_t)0;
|
||||
|
||||
MINFO("Validating txpool contents for v" << (unsigned)version);
|
||||
|
||||
LockedTXN lock(m_blockchain.get_db());
|
||||
@ -1699,6 +1789,106 @@ namespace cryptonote
|
||||
return n_removed;
|
||||
}
|
||||
//---------------------------------------------------------------------------------
|
||||
void tx_memory_pool::add_tx_to_transient_lists(const crypto::hash& txid, double fee, time_t receive_time)
|
||||
{
|
||||
|
||||
time_t now = time(NULL);
|
||||
const std::unordered_map<crypto::hash, time_t>::iterator it = m_added_txs_by_id.find(txid);
|
||||
if (it == m_added_txs_by_id.end())
|
||||
{
|
||||
m_added_txs_by_id.insert(std::make_pair(txid, now));
|
||||
}
|
||||
else
|
||||
{
|
||||
// This tx was already added to the map earlier, probably because then it was in the "stem"
|
||||
// phase of Dandelion++ and now is in the "fluff" phase i.e. got broadcasted: We have to set
|
||||
// a new time for clients that are not allowed to see sensitive txs to make sure they will
|
||||
// see it now if they query incrementally
|
||||
it->second = now;
|
||||
|
||||
auto sorted_it = find_tx_in_sorted_container(txid);
|
||||
if (sorted_it == m_txs_by_fee_and_receive_time.end())
|
||||
{
|
||||
MERROR("Re-adding tx " << txid << " to tx pool, but it was not found in the sorted txs container");
|
||||
}
|
||||
else
|
||||
{
|
||||
m_txs_by_fee_and_receive_time.erase(sorted_it);
|
||||
}
|
||||
}
|
||||
m_txs_by_fee_and_receive_time.emplace(std::pair<double, time_t>(fee, receive_time), txid);
|
||||
|
||||
// Don't check for "resurrected" txs in case of reorgs i.e. don't check in 'm_removed_txs_by_time'
|
||||
// whether we have that txid there and if yes remove it; this results in possible duplicates
|
||||
// where we return certain txids as deleted AND in the pool at the same time which requires
|
||||
// clients to process deleted ones BEFORE processing pool txs
|
||||
if (m_added_txs_start_time == (time_t)0)
|
||||
{
|
||||
m_added_txs_start_time = now;
|
||||
}
|
||||
}
|
||||
//---------------------------------------------------------------------------------
|
||||
void tx_memory_pool::remove_tx_from_transient_lists(const cryptonote::sorted_tx_container::iterator& sorted_it, const crypto::hash& txid, bool sensitive)
|
||||
{
|
||||
if (sorted_it == m_txs_by_fee_and_receive_time.end())
|
||||
{
|
||||
LOG_PRINT_L1("Removing tx " << txid << " from tx pool, but it was not found in the sorted txs container!");
|
||||
}
|
||||
else
|
||||
{
|
||||
m_txs_by_fee_and_receive_time.erase(sorted_it);
|
||||
}
|
||||
|
||||
const std::unordered_map<crypto::hash, time_t>::iterator it = m_added_txs_by_id.find(txid);
|
||||
if (it != m_added_txs_by_id.end())
|
||||
{
|
||||
m_added_txs_by_id.erase(it);
|
||||
}
|
||||
else
|
||||
{
|
||||
MDEBUG("Removing tx " << txid << " from tx pool, but it was not found in the map of added txs");
|
||||
}
|
||||
track_removed_tx(txid, sensitive);
|
||||
}
|
||||
//---------------------------------------------------------------------------------
|
||||
void tx_memory_pool::track_removed_tx(const crypto::hash& txid, bool sensitive)
|
||||
{
|
||||
time_t now = time(NULL);
|
||||
m_removed_txs_by_time.insert(std::make_pair(now, removed_tx_info{txid, sensitive}));
|
||||
MDEBUG("Transaction removed from pool: txid " << txid << ", total entries in removed list now " << m_removed_txs_by_time.size());
|
||||
if (m_removed_txs_start_time == (time_t)0)
|
||||
{
|
||||
m_removed_txs_start_time = now;
|
||||
}
|
||||
|
||||
// Simple system to make sure the list of removed ids does not swell to an unmanageable size: Set
|
||||
// an absolute size limit plus delete entries that are x minutes old (which is ok because clients
|
||||
// will sync with sensible time intervalls and should not ask for incremental info e.g. 1 hour back)
|
||||
const int MAX_REMOVED = 20000;
|
||||
if (m_removed_txs_by_time.size() > MAX_REMOVED)
|
||||
{
|
||||
auto erase_it = m_removed_txs_by_time.begin();
|
||||
std::advance(erase_it, MAX_REMOVED / 4 + 1);
|
||||
m_removed_txs_by_time.erase(m_removed_txs_by_time.begin(), erase_it);
|
||||
m_removed_txs_start_time = m_removed_txs_by_time.begin()->first;
|
||||
MDEBUG("Erased old transactions from big removed list, leaving " << m_removed_txs_by_time.size());
|
||||
}
|
||||
else
|
||||
{
|
||||
time_t earliest = now - (30 * 60); // 30 minutes
|
||||
std::map<time_t, removed_tx_info>::iterator from, to;
|
||||
from = m_removed_txs_by_time.begin();
|
||||
to = m_removed_txs_by_time.lower_bound(earliest);
|
||||
int distance = std::distance(from, to);
|
||||
if (distance > 0)
|
||||
{
|
||||
m_removed_txs_by_time.erase(from, to);
|
||||
m_removed_txs_start_time = earliest;
|
||||
MDEBUG("Erased " << distance << " old transactions from removed list, leaving " << m_removed_txs_by_time.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
//---------------------------------------------------------------------------------
|
||||
bool tx_memory_pool::init(size_t max_txpool_weight, bool mine_stem_txes)
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(m_transactions_lock);
|
||||
@ -1706,6 +1896,10 @@ namespace cryptonote
|
||||
|
||||
m_txpool_max_weight = max_txpool_weight ? max_txpool_weight : DEFAULT_TXPOOL_MAX_WEIGHT;
|
||||
m_txs_by_fee_and_receive_time.clear();
|
||||
m_added_txs_by_id.clear();
|
||||
m_added_txs_start_time = (time_t)0;
|
||||
m_removed_txs_by_time.clear();
|
||||
m_removed_txs_start_time = (time_t)0;
|
||||
m_spent_key_images.clear();
|
||||
m_txpool_weight = 0;
|
||||
std::vector<crypto::hash> remove;
|
||||
@ -1730,7 +1924,7 @@ namespace cryptonote
|
||||
MFATAL("Failed to insert key images from txpool tx");
|
||||
return false;
|
||||
}
|
||||
m_txs_by_fee_and_receive_time.emplace(std::pair<double, time_t>(meta.fee / (double)meta.weight, meta.receive_time), txid);
|
||||
add_tx_to_transient_lists(txid, meta.fee / (double)meta.weight, meta.receive_time);
|
||||
m_txpool_weight += meta.weight;
|
||||
return true;
|
||||
}, true, relay_category::all);
|
||||
|
@ -461,6 +461,7 @@ namespace cryptonote
|
||||
bool do_not_relay; //!< to avoid relay this transaction to the network
|
||||
|
||||
bool double_spend_seen; //!< true iff another tx was seen double spending this one
|
||||
bool sensitive;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -473,6 +474,13 @@ namespace cryptonote
|
||||
*/
|
||||
bool get_complement(const std::vector<crypto::hash> &hashes, std::vector<cryptonote::blobdata> &txes) const;
|
||||
|
||||
/**
|
||||
* @brief get info necessary for update of pool-related info in a wallet, preferably incremental
|
||||
*
|
||||
* @return true on success, false on error
|
||||
*/
|
||||
bool get_pool_info(time_t start_time, bool include_sensitive, std::vector<tx_details>& added_txs, std::vector<crypto::hash>& removed_txs, bool& incremental) const;
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
@ -577,6 +585,10 @@ namespace cryptonote
|
||||
*/
|
||||
void prune(size_t bytes = 0);
|
||||
|
||||
void add_tx_to_transient_lists(const crypto::hash& txid, double fee, time_t receive_time);
|
||||
void remove_tx_from_transient_lists(const cryptonote::sorted_tx_container::iterator& sorted_it, const crypto::hash& txid, bool sensitive);
|
||||
void track_removed_tx(const crypto::hash& txid, bool sensitive);
|
||||
|
||||
//TODO: confirm the below comments and investigate whether or not this
|
||||
// is the desired behavior
|
||||
//! map key images to transactions which spent them
|
||||
@ -609,6 +621,26 @@ private:
|
||||
|
||||
std::atomic<uint64_t> m_cookie; //!< incremented at each change
|
||||
|
||||
// Info when transactions entered the pool, accessible by txid
|
||||
std::unordered_map<crypto::hash, time_t> m_added_txs_by_id;
|
||||
|
||||
// Info at what time the pool started to track the adding of transactions
|
||||
time_t m_added_txs_start_time;
|
||||
|
||||
struct removed_tx_info
|
||||
{
|
||||
crypto::hash txid;
|
||||
bool sensitive;
|
||||
};
|
||||
|
||||
// Info about transactions that were removed from the pool, ordered by the time
|
||||
// of deletion
|
||||
std::multimap<time_t, removed_tx_info> m_removed_txs_by_time;
|
||||
|
||||
// Info how far back in time the list of removed tx ids currently reaches
|
||||
// (it gets shorted periodically to prevent overflow)
|
||||
time_t m_removed_txs_start_time;
|
||||
|
||||
/**
|
||||
* @brief get an iterator to a transaction in the sorted container
|
||||
*
|
||||
|
@ -598,88 +598,162 @@ namespace cryptonote
|
||||
|
||||
CHECK_PAYMENT(req, res, 1);
|
||||
|
||||
// quick check for noop
|
||||
if (!req.block_ids.empty())
|
||||
res.daemon_time = (uint64_t)time(NULL);
|
||||
// Always set daemon time, and set it early rather than late, as delivering some incremental pool
|
||||
// info twice because of slightly overlapping time intervals is no problem, whereas producing gaps
|
||||
// and never delivering something is
|
||||
|
||||
bool get_blocks = false;
|
||||
bool get_pool = false;
|
||||
switch (req.requested_info)
|
||||
{
|
||||
uint64_t last_block_height;
|
||||
crypto::hash last_block_hash;
|
||||
m_core.get_blockchain_top(last_block_height, last_block_hash);
|
||||
if (last_block_hash == req.block_ids.front())
|
||||
case COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_ONLY:
|
||||
// Compatibility value 0: Clients that do not set 'requested_info' want blocks, and only blocks
|
||||
get_blocks = true;
|
||||
break;
|
||||
case COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_AND_POOL:
|
||||
get_blocks = true;
|
||||
get_pool = true;
|
||||
break;
|
||||
case COMMAND_RPC_GET_BLOCKS_FAST::POOL_ONLY:
|
||||
get_pool = true;
|
||||
break;
|
||||
default:
|
||||
res.status = "Failed, wrong requested info";
|
||||
return true;
|
||||
}
|
||||
|
||||
res.pool_info_extent = COMMAND_RPC_GET_BLOCKS_FAST::NONE;
|
||||
|
||||
if (get_pool)
|
||||
{
|
||||
const bool restricted = m_restricted && ctx;
|
||||
const bool request_has_rpc_origin = ctx != NULL;
|
||||
const bool allow_sensitive = !request_has_rpc_origin || !restricted;
|
||||
|
||||
bool incremental;
|
||||
std::vector<tx_memory_pool::tx_details> added_pool_txs;
|
||||
bool success = m_core.get_pool_info((time_t)req.pool_info_since, allow_sensitive, added_pool_txs, res.removed_pool_txids, incremental);
|
||||
if (success)
|
||||
{
|
||||
res.start_height = 0;
|
||||
res.current_height = m_core.get_current_blockchain_height();
|
||||
res.status = CORE_RPC_STATUS_OK;
|
||||
res.added_pool_txs.clear();
|
||||
if (m_rpc_payment)
|
||||
{
|
||||
CHECK_PAYMENT_SAME_TS(req, res, added_pool_txs.size() * COST_PER_TX + res.removed_pool_txids.size() * COST_PER_POOL_HASH);
|
||||
}
|
||||
for (auto tx_detail: added_pool_txs)
|
||||
{
|
||||
COMMAND_RPC_GET_BLOCKS_FAST::pool_tx_info info;
|
||||
info.tx_hash = cryptonote::get_transaction_hash(tx_detail.tx);
|
||||
std::stringstream oss;
|
||||
binary_archive<true> ar(oss);
|
||||
bool r = ::serialization::serialize(ar, tx_detail.tx);
|
||||
if (!r)
|
||||
{
|
||||
res.status = "Failed to serialize transaction";
|
||||
return true;
|
||||
}
|
||||
info.tx_blob = oss.str();
|
||||
info.double_spend_seen = tx_detail.double_spend_seen;
|
||||
res.added_pool_txs.push_back(std::move(info));
|
||||
}
|
||||
}
|
||||
if (success)
|
||||
{
|
||||
res.pool_info_extent = incremental ? COMMAND_RPC_GET_BLOCKS_FAST::INCREMENTAL : COMMAND_RPC_GET_BLOCKS_FAST::FULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
res.status = "Failed to get pool info";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t max_blocks = COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT;
|
||||
if (m_rpc_payment)
|
||||
if (get_blocks)
|
||||
{
|
||||
max_blocks = res.credits / COST_PER_BLOCK;
|
||||
if (max_blocks > COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT)
|
||||
max_blocks = COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT;
|
||||
if (max_blocks == 0)
|
||||
// quick check for noop
|
||||
if (!req.block_ids.empty())
|
||||
{
|
||||
res.status = CORE_RPC_STATUS_PAYMENT_REQUIRED;
|
||||
uint64_t last_block_height;
|
||||
crypto::hash last_block_hash;
|
||||
m_core.get_blockchain_top(last_block_height, last_block_hash);
|
||||
if (last_block_hash == req.block_ids.front())
|
||||
{
|
||||
res.start_height = 0;
|
||||
res.current_height = last_block_height + 1;
|
||||
res.status = CORE_RPC_STATUS_OK;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t max_blocks = COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT;
|
||||
if (m_rpc_payment)
|
||||
{
|
||||
max_blocks = res.credits / COST_PER_BLOCK;
|
||||
if (max_blocks > COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT)
|
||||
max_blocks = COMMAND_RPC_GET_BLOCKS_FAST_MAX_BLOCK_COUNT;
|
||||
if (max_blocks == 0)
|
||||
{
|
||||
res.status = CORE_RPC_STATUS_PAYMENT_REQUIRED;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::pair<cryptonote::blobdata, crypto::hash>, std::vector<std::pair<crypto::hash, cryptonote::blobdata> > > > bs;
|
||||
if(!m_core.find_blockchain_supplement(req.start_height, req.block_ids, bs, res.current_height, res.start_height, req.prune, !req.no_miner_tx, max_blocks, COMMAND_RPC_GET_BLOCKS_FAST_MAX_TX_COUNT))
|
||||
{
|
||||
res.status = "Failed";
|
||||
add_host_fail(ctx);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::pair<cryptonote::blobdata, crypto::hash>, std::vector<std::pair<crypto::hash, cryptonote::blobdata> > > > bs;
|
||||
if(!m_core.find_blockchain_supplement(req.start_height, req.block_ids, bs, res.current_height, res.start_height, req.prune, !req.no_miner_tx, max_blocks, COMMAND_RPC_GET_BLOCKS_FAST_MAX_TX_COUNT))
|
||||
{
|
||||
res.status = "Failed";
|
||||
add_host_fail(ctx);
|
||||
return true;
|
||||
}
|
||||
CHECK_PAYMENT_SAME_TS(req, res, bs.size() * COST_PER_BLOCK);
|
||||
|
||||
CHECK_PAYMENT_SAME_TS(req, res, bs.size() * COST_PER_BLOCK);
|
||||
|
||||
size_t size = 0, ntxes = 0;
|
||||
res.blocks.reserve(bs.size());
|
||||
res.output_indices.reserve(bs.size());
|
||||
for(auto& bd: bs)
|
||||
{
|
||||
res.blocks.resize(res.blocks.size()+1);
|
||||
res.blocks.back().pruned = req.prune;
|
||||
res.blocks.back().block = bd.first.first;
|
||||
size += bd.first.first.size();
|
||||
res.output_indices.push_back(COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices());
|
||||
ntxes += bd.second.size();
|
||||
res.output_indices.back().indices.reserve(1 + bd.second.size());
|
||||
if (req.no_miner_tx)
|
||||
res.output_indices.back().indices.push_back(COMMAND_RPC_GET_BLOCKS_FAST::tx_output_indices());
|
||||
res.blocks.back().txs.reserve(bd.second.size());
|
||||
for (std::vector<std::pair<crypto::hash, cryptonote::blobdata>>::iterator i = bd.second.begin(); i != bd.second.end(); ++i)
|
||||
size_t size = 0, ntxes = 0;
|
||||
res.blocks.reserve(bs.size());
|
||||
res.output_indices.reserve(bs.size());
|
||||
for(auto& bd: bs)
|
||||
{
|
||||
res.blocks.back().txs.push_back({std::move(i->second), crypto::null_hash});
|
||||
i->second.clear();
|
||||
i->second.shrink_to_fit();
|
||||
size += res.blocks.back().txs.back().blob.size();
|
||||
}
|
||||
res.blocks.resize(res.blocks.size()+1);
|
||||
res.blocks.back().pruned = req.prune;
|
||||
res.blocks.back().block = bd.first.first;
|
||||
size += bd.first.first.size();
|
||||
res.output_indices.push_back(COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices());
|
||||
ntxes += bd.second.size();
|
||||
res.output_indices.back().indices.reserve(1 + bd.second.size());
|
||||
if (req.no_miner_tx)
|
||||
res.output_indices.back().indices.push_back(COMMAND_RPC_GET_BLOCKS_FAST::tx_output_indices());
|
||||
res.blocks.back().txs.reserve(bd.second.size());
|
||||
for (std::vector<std::pair<crypto::hash, cryptonote::blobdata>>::iterator i = bd.second.begin(); i != bd.second.end(); ++i)
|
||||
{
|
||||
res.blocks.back().txs.push_back({std::move(i->second), crypto::null_hash});
|
||||
i->second.clear();
|
||||
i->second.shrink_to_fit();
|
||||
size += res.blocks.back().txs.back().blob.size();
|
||||
}
|
||||
|
||||
const size_t n_txes_to_lookup = bd.second.size() + (req.no_miner_tx ? 0 : 1);
|
||||
if (n_txes_to_lookup > 0)
|
||||
{
|
||||
std::vector<std::vector<uint64_t>> indices;
|
||||
bool r = m_core.get_tx_outputs_gindexs(req.no_miner_tx ? bd.second.front().first : bd.first.second, n_txes_to_lookup, indices);
|
||||
if (!r)
|
||||
const size_t n_txes_to_lookup = bd.second.size() + (req.no_miner_tx ? 0 : 1);
|
||||
if (n_txes_to_lookup > 0)
|
||||
{
|
||||
res.status = "Failed";
|
||||
return true;
|
||||
std::vector<std::vector<uint64_t>> indices;
|
||||
bool r = m_core.get_tx_outputs_gindexs(req.no_miner_tx ? bd.second.front().first : bd.first.second, n_txes_to_lookup, indices);
|
||||
if (!r)
|
||||
{
|
||||
res.status = "Failed";
|
||||
return true;
|
||||
}
|
||||
if (indices.size() != n_txes_to_lookup || res.output_indices.back().indices.size() != (req.no_miner_tx ? 1 : 0))
|
||||
{
|
||||
res.status = "Failed";
|
||||
return true;
|
||||
}
|
||||
for (size_t i = 0; i < indices.size(); ++i)
|
||||
res.output_indices.back().indices.push_back({std::move(indices[i])});
|
||||
}
|
||||
if (indices.size() != n_txes_to_lookup || res.output_indices.back().indices.size() != (req.no_miner_tx ? 1 : 0))
|
||||
{
|
||||
res.status = "Failed";
|
||||
return true;
|
||||
}
|
||||
for (size_t i = 0; i < indices.size(); ++i)
|
||||
res.output_indices.back().indices.push_back({std::move(indices[i])});
|
||||
}
|
||||
MDEBUG("on_get_blocks: " << bs.size() << " blocks, " << ntxes << " txes, size " << size);
|
||||
}
|
||||
|
||||
MDEBUG("on_get_blocks: " << bs.size() << " blocks, " << ntxes << " txes, size " << size);
|
||||
res.status = CORE_RPC_STATUS_OK;
|
||||
return true;
|
||||
}
|
||||
|
@ -162,18 +162,29 @@ namespace cryptonote
|
||||
struct COMMAND_RPC_GET_BLOCKS_FAST
|
||||
{
|
||||
|
||||
enum REQUESTED_INFO
|
||||
{
|
||||
BLOCKS_ONLY = 0,
|
||||
BLOCKS_AND_POOL = 1,
|
||||
POOL_ONLY = 2
|
||||
};
|
||||
|
||||
struct request_t: public rpc_access_request_base
|
||||
{
|
||||
uint8_t requested_info;
|
||||
std::list<crypto::hash> block_ids; //*first 10 blocks id goes sequential, next goes in pow(2,n) offset, like 2, 4, 8, 16, 32, 64 and so on, and the last one is always genesis block */
|
||||
uint64_t start_height;
|
||||
bool prune;
|
||||
bool no_miner_tx;
|
||||
uint64_t pool_info_since;
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE_PARENT(rpc_access_request_base)
|
||||
KV_SERIALIZE_OPT(requested_info, (uint8_t)0)
|
||||
KV_SERIALIZE_CONTAINER_POD_AS_BLOB(block_ids)
|
||||
KV_SERIALIZE(start_height)
|
||||
KV_SERIALIZE(prune)
|
||||
KV_SERIALIZE_OPT(no_miner_tx, false)
|
||||
KV_SERIALIZE_OPT(pool_info_since, (uint64_t)0)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
typedef epee::misc_utils::struct_init<request_t> request;
|
||||
@ -196,12 +207,36 @@ namespace cryptonote
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
|
||||
struct pool_tx_info
|
||||
{
|
||||
crypto::hash tx_hash;
|
||||
blobdata tx_blob;
|
||||
bool double_spend_seen;
|
||||
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE_VAL_POD_AS_BLOB(tx_hash)
|
||||
KV_SERIALIZE(tx_blob)
|
||||
KV_SERIALIZE(double_spend_seen)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
|
||||
enum POOL_INFO_EXTENT
|
||||
{
|
||||
NONE = 0,
|
||||
INCREMENTAL = 1,
|
||||
FULL = 2
|
||||
};
|
||||
|
||||
struct response_t: public rpc_access_response_base
|
||||
{
|
||||
std::vector<block_complete_entry> blocks;
|
||||
uint64_t start_height;
|
||||
uint64_t current_height;
|
||||
std::vector<block_output_indices> output_indices;
|
||||
uint64_t daemon_time;
|
||||
uint8_t pool_info_extent;
|
||||
std::vector<pool_tx_info> added_pool_txs;
|
||||
std::vector<crypto::hash> removed_pool_txids;
|
||||
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE_PARENT(rpc_access_response_base)
|
||||
@ -209,6 +244,10 @@ namespace cryptonote
|
||||
KV_SERIALIZE(start_height)
|
||||
KV_SERIALIZE(current_height)
|
||||
KV_SERIALIZE(output_indices)
|
||||
KV_SERIALIZE(daemon_time)
|
||||
KV_SERIALIZE(pool_info_extent)
|
||||
KV_SERIALIZE(added_pool_txs)
|
||||
KV_SERIALIZE_CONTAINER_POD_AS_BLOB(removed_pool_txids)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
typedef epee::misc_utils::struct_init<response_t> response;
|
||||
|
@ -5895,7 +5895,10 @@ bool simple_wallet::refresh_main(uint64_t start_height, enum ResetType reset, bo
|
||||
{
|
||||
m_in_manual_refresh.store(true, std::memory_order_relaxed);
|
||||
epee::misc_utils::auto_scope_leave_caller scope_exit_handler = epee::misc_utils::create_scope_leave_handler([&](){m_in_manual_refresh.store(false, std::memory_order_relaxed);});
|
||||
m_wallet->refresh(m_wallet->is_trusted_daemon(), start_height, fetched_blocks, received_money);
|
||||
// For manual refresh don't allow incremental checking of the pool: Because we did not process the txs
|
||||
// for us in the pool during automatic refresh we could miss some of them if we checked the pool
|
||||
// incrementally here
|
||||
m_wallet->refresh(m_wallet->is_trusted_daemon(), start_height, fetched_blocks, received_money, true, false);
|
||||
|
||||
if (reset == ResetSoftKeepKI)
|
||||
{
|
||||
|
@ -1225,6 +1225,7 @@ wallet2::wallet2(network_type nettype, uint64_t kdf_rounds, bool unattended, std
|
||||
m_load_deprecated_formats(false),
|
||||
m_credits_target(0),
|
||||
m_enable_multisig(false),
|
||||
m_pool_info_query_time(0),
|
||||
m_has_ever_refreshed_from_node(false),
|
||||
m_allow_mismatched_daemon_version(false)
|
||||
{
|
||||
@ -2939,7 +2940,7 @@ void wallet2::parse_block_round(const cryptonote::blobdata &blob, cryptonote::bl
|
||||
error = !cryptonote::parse_and_validate_block_from_blob(blob, bl, bl_id);
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::pull_blocks(uint64_t start_height, uint64_t &blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices> &o_indices, uint64_t ¤t_height)
|
||||
void wallet2::pull_blocks(bool first, bool try_incremental, uint64_t start_height, uint64_t &blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices> &o_indices, uint64_t ¤t_height)
|
||||
{
|
||||
cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::request req = AUTO_VAL_INIT(req);
|
||||
cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::response res = AUTO_VAL_INIT(res);
|
||||
@ -2951,6 +2952,10 @@ void wallet2::pull_blocks(uint64_t start_height, uint64_t &blocks_start_height,
|
||||
req.start_height = start_height;
|
||||
req.no_miner_tx = m_refresh_type == RefreshNoCoinbase;
|
||||
|
||||
req.requested_info = first ? COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_AND_POOL : COMMAND_RPC_GET_BLOCKS_FAST::BLOCKS_ONLY;
|
||||
if (try_incremental)
|
||||
req.pool_info_since = m_pool_info_query_time;
|
||||
|
||||
{
|
||||
const boost::lock_guard<boost::recursive_mutex> lock{m_daemon_rpc_mutex};
|
||||
uint64_t pre_call_credits = m_rpc_payment_state.credits;
|
||||
@ -2960,16 +2965,36 @@ void wallet2::pull_blocks(uint64_t start_height, uint64_t &blocks_start_height,
|
||||
THROW_WALLET_EXCEPTION_IF(res.blocks.size() != res.output_indices.size(), error::wallet_internal_error,
|
||||
"mismatched blocks (" + boost::lexical_cast<std::string>(res.blocks.size()) + ") and output_indices (" +
|
||||
boost::lexical_cast<std::string>(res.output_indices.size()) + ") sizes from daemon");
|
||||
check_rpc_cost("/getblocks.bin", res.credits, pre_call_credits, 1 + res.blocks.size() * COST_PER_BLOCK);
|
||||
uint64_t pool_info_cost = res.added_pool_txs.size() * COST_PER_TX + res.removed_pool_txids.size() * COST_PER_POOL_HASH;
|
||||
check_rpc_cost("/getblocks.bin", res.credits, pre_call_credits, 1 + res.blocks.size() * COST_PER_BLOCK + pool_info_cost);
|
||||
}
|
||||
|
||||
blocks_start_height = res.start_height;
|
||||
blocks = std::move(res.blocks);
|
||||
o_indices = std::move(res.output_indices);
|
||||
current_height = res.current_height;
|
||||
if (res.pool_info_extent != COMMAND_RPC_GET_BLOCKS_FAST::NONE)
|
||||
m_pool_info_query_time = res.daemon_time;
|
||||
|
||||
MDEBUG("Pulled blocks: blocks_start_height " << blocks_start_height << ", count " << blocks.size()
|
||||
<< ", height " << blocks_start_height + blocks.size() << ", node height " << res.current_height);
|
||||
<< ", height " << blocks_start_height + blocks.size() << ", node height " << res.current_height
|
||||
<< ", pool info " << static_cast<unsigned int>(res.pool_info_extent));
|
||||
|
||||
if (first)
|
||||
{
|
||||
if (res.pool_info_extent != COMMAND_RPC_GET_BLOCKS_FAST::NONE)
|
||||
{
|
||||
update_pool_state_from_pool_data(res, m_process_pool_txs, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
// If we did not get any pool info, neither incremental nor the whole pool, we probably talk
|
||||
// to a daemon that does not yet support giving back pool info with the 'getblocks' call,
|
||||
// and we have to update in the "old way"
|
||||
update_pool_state_by_pool_query(m_process_pool_txs, true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::pull_hashes(uint64_t start_height, uint64_t &blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<crypto::hash> &hashes)
|
||||
@ -3219,7 +3244,7 @@ void check_block_hard_fork_version(cryptonote::network_type nettype, uint8_t hf_
|
||||
daemon_is_outdated = height < start_height || height >= end_height;
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::pull_and_parse_next_blocks(uint64_t start_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, const std::vector<cryptonote::block_complete_entry> &prev_blocks, const std::vector<parsed_block> &prev_parsed_blocks, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<parsed_block> &parsed_blocks, bool &last, bool &error, std::exception_ptr &exception)
|
||||
void wallet2::pull_and_parse_next_blocks(bool first, bool try_incremental, uint64_t start_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, const std::vector<cryptonote::block_complete_entry> &prev_blocks, const std::vector<parsed_block> &prev_parsed_blocks, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<parsed_block> &parsed_blocks, bool &last, bool &error, std::exception_ptr &exception)
|
||||
{
|
||||
error = false;
|
||||
last = false;
|
||||
@ -3241,7 +3266,7 @@ void wallet2::pull_and_parse_next_blocks(uint64_t start_height, uint64_t &blocks
|
||||
// pull the new blocks
|
||||
std::vector<cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices> o_indices;
|
||||
uint64_t current_height;
|
||||
pull_blocks(start_height, blocks_start_height, short_chain_history, blocks, o_indices, current_height);
|
||||
pull_blocks(first, try_incremental, start_height, blocks_start_height, short_chain_history, blocks, o_indices, current_height);
|
||||
THROW_WALLET_EXCEPTION_IF(blocks.size() != o_indices.size(), error::wallet_internal_error, "Mismatched sizes of blocks and o_indices");
|
||||
|
||||
tools::threadpool& tpool = tools::threadpool::getInstanceForCompute();
|
||||
@ -3305,9 +3330,10 @@ void wallet2::pull_and_parse_next_blocks(uint64_t start_height, uint64_t &blocks
|
||||
}
|
||||
}
|
||||
|
||||
void wallet2::remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashes)
|
||||
void wallet2::remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashes, bool remove_if_found)
|
||||
{
|
||||
// remove pool txes to us that aren't in the pool anymore
|
||||
// remove pool txes to us that aren't in the pool anymore (remove_if_found = false),
|
||||
// or remove pool txes to us that were reported as removed (remove_if_found = true)
|
||||
std::unordered_multimap<crypto::hash, wallet2::pool_payment_details>::iterator uit = m_unconfirmed_payments.begin();
|
||||
while (uit != m_unconfirmed_payments.end())
|
||||
{
|
||||
@ -3322,9 +3348,9 @@ void wallet2::remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashe
|
||||
}
|
||||
}
|
||||
auto pit = uit++;
|
||||
if (!found)
|
||||
if ((!remove_if_found && !found) || (remove_if_found && found))
|
||||
{
|
||||
MDEBUG("Removing " << txid << " from unconfirmed payments, not found in pool");
|
||||
MDEBUG("Removing " << txid << " from unconfirmed payments");
|
||||
m_unconfirmed_payments.erase(pit);
|
||||
if (0 != m_callback)
|
||||
m_callback->on_pool_tx_removed(txid);
|
||||
@ -3333,9 +3359,183 @@ void wallet2::remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashe
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::update_pool_state(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed)
|
||||
// Code that is common to 'update_pool_state_by_pool_query' and 'update_pool_state_from_pool_data':
|
||||
// Check wether a tx in the pool is worthy of processing because we did not see it
|
||||
// yet or because it is "interesting" out of special circumstances
|
||||
bool wallet2::accept_pool_tx_for_processing(const crypto::hash &txid)
|
||||
{
|
||||
MTRACE("update_pool_state start");
|
||||
bool txid_found_in_up = false;
|
||||
for (const auto &up: m_unconfirmed_payments)
|
||||
{
|
||||
if (up.second.m_pd.m_tx_hash == txid)
|
||||
{
|
||||
txid_found_in_up = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (m_scanned_pool_txs[0].find(txid) != m_scanned_pool_txs[0].end() || m_scanned_pool_txs[1].find(txid) != m_scanned_pool_txs[1].end())
|
||||
{
|
||||
// if it's for us, we want to keep track of whether we saw a double spend, so don't bail out
|
||||
if (!txid_found_in_up)
|
||||
{
|
||||
LOG_PRINT_L2("Already seen " << txid << ", and not for us, skipped");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (!txid_found_in_up)
|
||||
{
|
||||
LOG_PRINT_L1("Found new pool tx: " << txid);
|
||||
bool found = false;
|
||||
for (const auto &i: m_unconfirmed_txs)
|
||||
{
|
||||
if (i.first == txid)
|
||||
{
|
||||
found = true;
|
||||
// if this is a payment to yourself at a different subaddress account, don't skip it
|
||||
// so that you can see the incoming pool tx with 'show_transfers' on that receiving subaddress account
|
||||
const unconfirmed_transfer_details& utd = i.second;
|
||||
for (const auto& dst : utd.m_dests)
|
||||
{
|
||||
auto subaddr_index = m_subaddresses.find(dst.addr.m_spend_public_key);
|
||||
if (subaddr_index != m_subaddresses.end() && subaddr_index->second.major != utd.m_subaddr_account)
|
||||
{
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
{
|
||||
// not one of those we sent ourselves
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_PRINT_L1("We sent that one");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Code that is common to 'update_pool_state_by_pool_query' and 'update_pool_state_from_pool_data':
|
||||
// Process an unconfirmed transfer after we know whether it's in the pool or not
|
||||
void wallet2::process_unconfirmed_transfer(bool incremental, const crypto::hash &txid, wallet2::unconfirmed_transfer_details &tx_details, bool seen_in_pool, std::chrono::system_clock::time_point now, bool refreshed)
|
||||
{
|
||||
// TODO: set tx_propagation_timeout to CRYPTONOTE_DANDELIONPP_EMBARGO_AVERAGE * 3 / 2 after v15 hardfork
|
||||
constexpr const std::chrono::seconds tx_propagation_timeout{500};
|
||||
if (seen_in_pool)
|
||||
{
|
||||
if (tx_details.m_state != wallet2::unconfirmed_transfer_details::pending_in_pool)
|
||||
{
|
||||
tx_details.m_state = wallet2::unconfirmed_transfer_details::pending_in_pool;
|
||||
MINFO("Pending txid " << txid << " seen in pool, marking as pending in pool");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!incremental)
|
||||
{
|
||||
if (tx_details.m_state == wallet2::unconfirmed_transfer_details::pending_in_pool)
|
||||
{
|
||||
// For the probably unlikely case that a tx once seen in the pool vanishes
|
||||
// again set back to 'pending'
|
||||
tx_details.m_state = wallet2::unconfirmed_transfer_details::pending;
|
||||
MINFO("Already seen txid " << txid << " vanished from pool, marking as pending");
|
||||
}
|
||||
}
|
||||
// If a tx is pending for a "long time" without appearing in the pool, and if
|
||||
// we have refreshed and thus had a chance to really see it if it was there,
|
||||
// judge it as failed; the waiting for timeout and refresh happened avoids
|
||||
// false alarms with txs going to 'failed' too early
|
||||
if (tx_details.m_state == wallet2::unconfirmed_transfer_details::pending && refreshed &&
|
||||
now > std::chrono::system_clock::from_time_t(tx_details.m_sent_time) + tx_propagation_timeout)
|
||||
{
|
||||
LOG_PRINT_L1("Pending txid " << txid << " not in pool after " << tx_propagation_timeout.count() <<
|
||||
" seconds, marking as failed");
|
||||
tx_details.m_state = wallet2::unconfirmed_transfer_details::failed;
|
||||
|
||||
// the inputs aren't spent anymore, since the tx failed
|
||||
for (size_t vini = 0; vini < tx_details.m_tx.vin.size(); ++vini)
|
||||
{
|
||||
if (tx_details.m_tx.vin[vini].type() == typeid(txin_to_key))
|
||||
{
|
||||
txin_to_key &tx_in_to_key = boost::get<txin_to_key>(tx_details.m_tx.vin[vini]);
|
||||
for (size_t i = 0; i < m_transfers.size(); ++i)
|
||||
{
|
||||
const transfer_details &td = m_transfers[i];
|
||||
if (td.m_key_image == tx_in_to_key.k_image)
|
||||
{
|
||||
LOG_PRINT_L1("Resetting spent status for output " << vini << ": " << td.m_key_image);
|
||||
set_unspent(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// This public method is typically called to make sure that the wallet's pool state is up-to-date by
|
||||
// clients like simplewallet and the RPC daemon. Before incremental update this was the same method
|
||||
// that 'refresh' also used, but now it's more complicated because for the time being we support
|
||||
// the "old" and the "new" way of updating the pool and because only the 'getblocks' call supports
|
||||
// incremental update but we don't want any blocks here.
|
||||
//
|
||||
// simplewallet does NOT update the pool info during automatic refresh to avoid disturbing interactive
|
||||
// messages and prompts. When it finally calls this method here "to catch up" so to say we can't use
|
||||
// incremental update anymore, because with that we might miss some txs altogether.
|
||||
void wallet2::update_pool_state(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed, bool try_incremental)
|
||||
{
|
||||
bool updated = false;
|
||||
if (m_pool_info_query_time != 0 && try_incremental)
|
||||
{
|
||||
// We are connected to a daemon that supports giving back pool data with the 'getblocks' call,
|
||||
// thus use that, to get the chance to work incrementally and to keep working incrementally;
|
||||
// 'POOL_ONLY' was created to support this case
|
||||
cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::request req = AUTO_VAL_INIT(req);
|
||||
cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::response res = AUTO_VAL_INIT(res);
|
||||
|
||||
req.requested_info = COMMAND_RPC_GET_BLOCKS_FAST::POOL_ONLY;
|
||||
req.pool_info_since = m_pool_info_query_time;
|
||||
|
||||
{
|
||||
const boost::lock_guard<boost::recursive_mutex> lock{m_daemon_rpc_mutex};
|
||||
uint64_t pre_call_credits = m_rpc_payment_state.credits;
|
||||
req.client = get_client_signature();
|
||||
bool r = net_utils::invoke_http_bin("/getblocks.bin", req, res, *m_http_client, rpc_timeout);
|
||||
THROW_ON_RPC_RESPONSE_ERROR(r, {}, res, "getblocks.bin", error::get_blocks_error, get_rpc_status(res.status));
|
||||
uint64_t pool_info_cost = res.added_pool_txs.size() * COST_PER_TX + res.removed_pool_txids.size() * COST_PER_POOL_HASH;
|
||||
check_rpc_cost("/getblocks.bin", res.credits, pre_call_credits, pool_info_cost);
|
||||
}
|
||||
|
||||
m_pool_info_query_time = res.daemon_time;
|
||||
if (res.pool_info_extent != COMMAND_RPC_GET_BLOCKS_FAST::NONE)
|
||||
{
|
||||
update_pool_state_from_pool_data(res, process_txs, refreshed);
|
||||
updated = true;
|
||||
}
|
||||
// We SHOULD get pool data here, but if for some crazy reason we don't fall back to the "old" method
|
||||
}
|
||||
if (!updated)
|
||||
{
|
||||
update_pool_state_by_pool_query(process_txs, refreshed);
|
||||
}
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// This is the "old" way of updating the pool with separate queries to get the pool content, used before
|
||||
// the 'getblocks' command was able to give back pool data in addition to blocks. Before this code was
|
||||
// the public 'update_pool_state' method. The logic is unchanged. This is a candidate for elimination
|
||||
// when it's sure that no more "old" daemons can be possibly around.
|
||||
void wallet2::update_pool_state_by_pool_query(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed)
|
||||
{
|
||||
MTRACE("update_pool_state_by_pool_query start");
|
||||
process_txs.clear();
|
||||
|
||||
auto keys_reencryptor = epee::misc_utils::create_scope_leave_handler([&, this]() {
|
||||
m_encrypt_keys_after_refresh.reset();
|
||||
@ -3353,16 +3553,15 @@ void wallet2::update_pool_state(std::vector<std::tuple<cryptonote::transaction,
|
||||
THROW_ON_RPC_RESPONSE_ERROR(r, {}, res, "get_transaction_pool_hashes.bin", error::get_tx_pool_error);
|
||||
check_rpc_cost("/get_transaction_pool_hashes.bin", res.credits, pre_call_credits, 1 + res.tx_hashes.size() * COST_PER_POOL_HASH);
|
||||
}
|
||||
MTRACE("update_pool_state got pool");
|
||||
MTRACE("update_pool_state_by_pool_query got pool");
|
||||
|
||||
// remove any pending tx that's not in the pool
|
||||
// TODO: set tx_propagation_timeout to CRYPTONOTE_DANDELIONPP_EMBARGO_AVERAGE * 3 / 2 after v15 hardfork
|
||||
constexpr const std::chrono::seconds tx_propagation_timeout{500};
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
std::unordered_map<crypto::hash, wallet2::unconfirmed_transfer_details>::iterator it = m_unconfirmed_txs.begin();
|
||||
while (it != m_unconfirmed_txs.end())
|
||||
{
|
||||
const crypto::hash &txid = it->first;
|
||||
MDEBUG("Checking m_unconfirmed_txs entry " << txid);
|
||||
bool found = false;
|
||||
for (const auto &it2: res.tx_hashes)
|
||||
{
|
||||
@ -3373,114 +3572,26 @@ void wallet2::update_pool_state(std::vector<std::tuple<cryptonote::transaction,
|
||||
}
|
||||
}
|
||||
auto pit = it++;
|
||||
if (!found)
|
||||
{
|
||||
// we want to avoid a false positive when we ask for the pool just after
|
||||
// a tx is removed from the pool due to being found in a new block, but
|
||||
// just before the block is visible by refresh. So we keep a boolean, so
|
||||
// that the first time we don't see the tx, we set that boolean, and only
|
||||
// delete it the second time it is checked (but only when refreshed, so
|
||||
// we're sure we've seen the blockchain state first)
|
||||
if (pit->second.m_state == wallet2::unconfirmed_transfer_details::pending)
|
||||
{
|
||||
LOG_PRINT_L1("Pending txid " << txid << " not in pool, marking as not in pool");
|
||||
pit->second.m_state = wallet2::unconfirmed_transfer_details::pending_not_in_pool;
|
||||
}
|
||||
else if (pit->second.m_state == wallet2::unconfirmed_transfer_details::pending_not_in_pool && refreshed &&
|
||||
now > std::chrono::system_clock::from_time_t(pit->second.m_sent_time) + tx_propagation_timeout)
|
||||
{
|
||||
LOG_PRINT_L1("Pending txid " << txid << " not in pool after " << tx_propagation_timeout.count() <<
|
||||
" seconds, marking as failed");
|
||||
pit->second.m_state = wallet2::unconfirmed_transfer_details::failed;
|
||||
|
||||
// the inputs aren't spent anymore, since the tx failed
|
||||
for (size_t vini = 0; vini < pit->second.m_tx.vin.size(); ++vini)
|
||||
{
|
||||
if (pit->second.m_tx.vin[vini].type() == typeid(txin_to_key))
|
||||
{
|
||||
txin_to_key &tx_in_to_key = boost::get<txin_to_key>(pit->second.m_tx.vin[vini]);
|
||||
for (size_t i = 0; i < m_transfers.size(); ++i)
|
||||
{
|
||||
const transfer_details &td = m_transfers[i];
|
||||
if (td.m_key_image == tx_in_to_key.k_image)
|
||||
{
|
||||
LOG_PRINT_L1("Resetting spent status for output " << vini << ": " << td.m_key_image);
|
||||
set_unspent(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
process_unconfirmed_transfer(false, txid, pit->second, found, now, refreshed);
|
||||
MDEBUG("New state of that entry: " << pit->second.m_state);
|
||||
}
|
||||
MTRACE("update_pool_state done first loop");
|
||||
MTRACE("update_pool_state_by_pool_query done first loop");
|
||||
|
||||
// remove pool txes to us that aren't in the pool anymore
|
||||
// but only if we just refreshed, so that the tx can go in
|
||||
// the in transfers list instead (or nowhere if it just
|
||||
// disappeared without being mined)
|
||||
if (refreshed)
|
||||
remove_obsolete_pool_txs(res.tx_hashes);
|
||||
remove_obsolete_pool_txs(res.tx_hashes, false);
|
||||
|
||||
MTRACE("update_pool_state done second loop");
|
||||
MTRACE("update_pool_state_by_pool_query done second loop");
|
||||
|
||||
// gather txids of new pool txes to us
|
||||
std::vector<std::pair<crypto::hash, bool>> txids;
|
||||
for (const auto &txid: res.tx_hashes)
|
||||
{
|
||||
bool txid_found_in_up = false;
|
||||
for (const auto &up: m_unconfirmed_payments)
|
||||
{
|
||||
if (up.second.m_pd.m_tx_hash == txid)
|
||||
{
|
||||
txid_found_in_up = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (m_scanned_pool_txs[0].find(txid) != m_scanned_pool_txs[0].end() || m_scanned_pool_txs[1].find(txid) != m_scanned_pool_txs[1].end())
|
||||
{
|
||||
// if it's for us, we want to keep track of whether we saw a double spend, so don't bail out
|
||||
if (!txid_found_in_up)
|
||||
{
|
||||
LOG_PRINT_L2("Already seen " << txid << ", and not for us, skipped");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!txid_found_in_up)
|
||||
{
|
||||
LOG_PRINT_L1("Found new pool tx: " << txid);
|
||||
bool found = false;
|
||||
for (const auto &i: m_unconfirmed_txs)
|
||||
{
|
||||
if (i.first == txid)
|
||||
{
|
||||
found = true;
|
||||
// if this is a payment to yourself at a different subaddress account, don't skip it
|
||||
// so that you can see the incoming pool tx with 'show_transfers' on that receiving subaddress account
|
||||
const unconfirmed_transfer_details& utd = i.second;
|
||||
for (const auto& dst : utd.m_dests)
|
||||
{
|
||||
auto subaddr_index = m_subaddresses.find(dst.addr.m_spend_public_key);
|
||||
if (subaddr_index != m_subaddresses.end() && subaddr_index->second.major != utd.m_subaddr_account)
|
||||
{
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
{
|
||||
// not one of those we sent ourselves
|
||||
txids.push_back({txid, false});
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_PRINT_L1("We sent that one");
|
||||
}
|
||||
}
|
||||
if (accept_pool_tx_for_processing(txid))
|
||||
txids.push_back({txid, false});
|
||||
}
|
||||
|
||||
// get_transaction_pool_hashes.bin may return more transactions than we're allowed to request in restricted mode
|
||||
@ -3555,11 +3666,91 @@ void wallet2::update_pool_state(std::vector<std::tuple<cryptonote::transaction,
|
||||
LOG_PRINT_L0("Error calling gettransactions daemon RPC: r " << r << ", status " << get_rpc_status(res.status));
|
||||
}
|
||||
}
|
||||
MTRACE("update_pool_state end");
|
||||
MTRACE("update_pool_state_by_pool_query end");
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Update pool state from pool data we got together with block data, either incremental data with
|
||||
// txs that are new in the pool since the last time we queried and the ids of txs that were
|
||||
// removed from the pool since then, or the whole content of the pool if incremental was not
|
||||
// possible, e.g. because the server was just started or restarted.
|
||||
void wallet2::update_pool_state_from_pool_data(const cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::response &res, std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed)
|
||||
{
|
||||
MTRACE("update_pool_state_from_pool_data start");
|
||||
auto keys_reencryptor = epee::misc_utils::create_scope_leave_handler([&, this]() {
|
||||
m_encrypt_keys_after_refresh.reset();
|
||||
});
|
||||
|
||||
bool incremental = res.pool_info_extent == COMMAND_RPC_GET_BLOCKS_FAST::INCREMENTAL;
|
||||
|
||||
if (refreshed)
|
||||
{
|
||||
if (incremental)
|
||||
{
|
||||
// Delete from the list of unconfirmed payments what the daemon reported as tx that was removed from
|
||||
// pool; do so only after refresh to not delete too early and too eagerly; maybe we will find the tx
|
||||
// later in a block, or not, or find it again in the pool txs because it was first removed but then
|
||||
// somehow quickly "resurrected" - that all does not matter here, we retrace the removal
|
||||
remove_obsolete_pool_txs(res.removed_pool_txids, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Delete from the list of unconfirmed payments what we don't find anymore in the pool; a bit
|
||||
// unfortunate that we have to build a new vector with ids first, but better than copying and
|
||||
// modifying the code of 'remove_obsolete_pool_txs' here
|
||||
std::vector<crypto::hash> txids;
|
||||
txids.reserve(res.added_pool_txs.size());
|
||||
for (const auto &it: res.added_pool_txs)
|
||||
{
|
||||
txids.push_back(it.tx_hash);
|
||||
}
|
||||
remove_obsolete_pool_txs(txids, false);
|
||||
}
|
||||
}
|
||||
|
||||
// Possibly remove any pending tx that's not in the pool
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
std::unordered_map<crypto::hash, wallet2::unconfirmed_transfer_details>::iterator it = m_unconfirmed_txs.begin();
|
||||
while (it != m_unconfirmed_txs.end())
|
||||
{
|
||||
const crypto::hash &txid = it->first;
|
||||
MDEBUG("Checking m_unconfirmed_txs entry " << txid);
|
||||
bool found = false;
|
||||
for (const auto &it2: res.added_pool_txs)
|
||||
{
|
||||
if (it2.tx_hash == txid)
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
auto pit = it++;
|
||||
process_unconfirmed_transfer(incremental, txid, pit->second, found, now, refreshed);
|
||||
MDEBUG("Resulting state of that entry: " << pit->second.m_state);
|
||||
}
|
||||
|
||||
// Collect all pool txs that are "interesting" i.e. mostly those that we don't know about yet;
|
||||
// if we work incrementally and thus see only new pool txs since last time we asked it should
|
||||
// be rare that we know already about one of those, but check nevertheless
|
||||
process_txs.clear();
|
||||
for (const auto &pool_tx: res.added_pool_txs)
|
||||
{
|
||||
cryptonote::transaction tx;
|
||||
THROW_WALLET_EXCEPTION_IF(!cryptonote::parse_and_validate_tx_from_blob(pool_tx.tx_blob, tx),
|
||||
error::wallet_internal_error, "Failed to validate transaction from daemon");
|
||||
const crypto::hash &txid = pool_tx.tx_hash;
|
||||
bool take = accept_pool_tx_for_processing(txid);
|
||||
if (take)
|
||||
{
|
||||
process_txs.push_back(std::make_tuple(tx, txid, pool_tx.double_spend_seen));
|
||||
}
|
||||
}
|
||||
|
||||
MTRACE("update_pool_state_from_pool_data end");
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::process_pool_state(const std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &txs)
|
||||
{
|
||||
MTRACE("process_pool_state start");
|
||||
const time_t now = time(NULL);
|
||||
for (const auto &e: txs)
|
||||
{
|
||||
@ -3574,6 +3765,7 @@ void wallet2::process_pool_state(const std::vector<std::tuple<cryptonote::transa
|
||||
m_scanned_pool_txs[0].clear();
|
||||
}
|
||||
}
|
||||
MTRACE("process_pool_state end");
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::fast_refresh(uint64_t stop_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, bool force)
|
||||
@ -3694,7 +3886,7 @@ std::shared_ptr<std::map<std::pair<uint64_t, uint64_t>, size_t>> wallet2::create
|
||||
return cache;
|
||||
}
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blocks_fetched, bool& received_money, bool check_pool)
|
||||
void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blocks_fetched, bool& received_money, bool check_pool, bool try_incremental)
|
||||
{
|
||||
if (m_offline)
|
||||
{
|
||||
@ -3778,12 +3970,15 @@ void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blo
|
||||
|
||||
auto scope_exit_handler_hwdev = epee::misc_utils::create_scope_leave_handler([&](){hwdev.computing_key_images(false);});
|
||||
|
||||
m_process_pool_txs.clear();
|
||||
// Getting and processing the pool state has moved down into method 'pull_blocks' to
|
||||
// allow for "conventional" as well as "incremental" update. However the following
|
||||
// principle of getting all info first (pool AND blocks) and only process txs afterwards
|
||||
// still holds and is still respected:
|
||||
// get updated pool state first, but do not process those txes just yet,
|
||||
// since that might cause a password prompt, which would introduce a data
|
||||
// leak allowing a passive adversary with traffic analysis capability to
|
||||
// infer when we get an incoming output
|
||||
std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> process_pool_txs;
|
||||
update_pool_state(process_pool_txs, true);
|
||||
|
||||
bool first = true, last = false;
|
||||
while(m_run.load(std::memory_order_relaxed))
|
||||
@ -3804,11 +3999,10 @@ void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blo
|
||||
if (!first && blocks.empty())
|
||||
{
|
||||
m_node_rpc_proxy.set_height(m_blockchain.size());
|
||||
refreshed = true;
|
||||
break;
|
||||
}
|
||||
if (!last)
|
||||
tpool.submit(&waiter, [&]{pull_and_parse_next_blocks(start_height, next_blocks_start_height, short_chain_history, blocks, parsed_blocks, next_blocks, next_parsed_blocks, last, error, exception);});
|
||||
tpool.submit(&waiter, [&]{pull_and_parse_next_blocks(first, try_incremental, start_height, next_blocks_start_height, short_chain_history, blocks, parsed_blocks, next_blocks, next_parsed_blocks, last, error, exception);});
|
||||
|
||||
if (!first)
|
||||
{
|
||||
@ -3863,7 +4057,6 @@ void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blo
|
||||
if(!first && blocks_start_height == next_blocks_start_height)
|
||||
{
|
||||
m_node_rpc_proxy.set_height(m_blockchain.size());
|
||||
refreshed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3930,8 +4123,8 @@ void wallet2::refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blo
|
||||
try
|
||||
{
|
||||
// If stop() is called we don't need to check pending transactions
|
||||
if (check_pool && m_run.load(std::memory_order_relaxed) && !process_pool_txs.empty())
|
||||
process_pool_state(process_pool_txs);
|
||||
if (check_pool && m_run.load(std::memory_order_relaxed) && !m_process_pool_txs.empty())
|
||||
process_pool_state(m_process_pool_txs);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -10148,7 +10341,7 @@ void wallet2::light_wallet_get_address_txs()
|
||||
}
|
||||
}
|
||||
// TODO: purge old unconfirmed_txs
|
||||
remove_obsolete_pool_txs(pool_txs);
|
||||
remove_obsolete_pool_txs(pool_txs, false);
|
||||
|
||||
// Calculate wallet balance
|
||||
m_light_wallet_balance = ires.total_received-wallet_total_sent;
|
||||
|
@ -476,7 +476,7 @@ private:
|
||||
time_t m_sent_time;
|
||||
std::vector<cryptonote::tx_destination_entry> m_dests;
|
||||
crypto::hash m_payment_id;
|
||||
enum { pending, pending_not_in_pool, failed } m_state;
|
||||
enum { pending, pending_in_pool, failed } m_state;
|
||||
uint64_t m_timestamp;
|
||||
uint32_t m_subaddr_account; // subaddress account of your wallet to be used in this transfer
|
||||
std::set<uint32_t> m_subaddr_indices; // set of address indices used as inputs in this transfer
|
||||
@ -1048,7 +1048,7 @@ private:
|
||||
bool is_deprecated() const;
|
||||
void refresh(bool trusted_daemon);
|
||||
void refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blocks_fetched);
|
||||
void refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blocks_fetched, bool& received_money, bool check_pool = true);
|
||||
void refresh(bool trusted_daemon, uint64_t start_height, uint64_t & blocks_fetched, bool& received_money, bool check_pool = true, bool try_incremental = true);
|
||||
bool refresh(bool trusted_daemon, uint64_t & blocks_fetched, bool& received_money, bool& ok);
|
||||
|
||||
void set_refresh_type(RefreshType refresh_type) { m_refresh_type = refresh_type; }
|
||||
@ -1531,9 +1531,9 @@ private:
|
||||
bool import_key_images(signed_tx_set & signed_tx, size_t offset=0, bool only_selected_transfers=false);
|
||||
crypto::public_key get_tx_pub_key_from_received_outs(const tools::wallet2::transfer_details &td) const;
|
||||
|
||||
void update_pool_state(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed = false);
|
||||
void update_pool_state(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed = false, bool try_incremental = false);
|
||||
void process_pool_state(const std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &txs);
|
||||
void remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashes);
|
||||
void remove_obsolete_pool_txs(const std::vector<crypto::hash> &tx_hashes, bool remove_if_found);
|
||||
|
||||
std::string encrypt(const char *plaintext, size_t len, const crypto::secret_key &skey, bool authenticated = true) const;
|
||||
std::string encrypt(const epee::span<char> &span, const crypto::secret_key &skey, bool authenticated = true) const;
|
||||
@ -1733,11 +1733,15 @@ private:
|
||||
void get_short_chain_history(std::list<crypto::hash>& ids, uint64_t granularity = 1) const;
|
||||
bool clear();
|
||||
void clear_soft(bool keep_key_images=false);
|
||||
void pull_blocks(uint64_t start_height, uint64_t& blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices> &o_indices, uint64_t ¤t_height);
|
||||
void pull_blocks(bool first, bool try_incremental, uint64_t start_height, uint64_t& blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::block_output_indices> &o_indices, uint64_t ¤t_height);
|
||||
void pull_hashes(uint64_t start_height, uint64_t& blocks_start_height, const std::list<crypto::hash> &short_chain_history, std::vector<crypto::hash> &hashes);
|
||||
void fast_refresh(uint64_t stop_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, bool force = false);
|
||||
void pull_and_parse_next_blocks(uint64_t start_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, const std::vector<cryptonote::block_complete_entry> &prev_blocks, const std::vector<parsed_block> &prev_parsed_blocks, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<parsed_block> &parsed_blocks, bool &last, bool &error, std::exception_ptr &exception);
|
||||
void pull_and_parse_next_blocks(bool first, bool try_incremental, uint64_t start_height, uint64_t &blocks_start_height, std::list<crypto::hash> &short_chain_history, const std::vector<cryptonote::block_complete_entry> &prev_blocks, const std::vector<parsed_block> &prev_parsed_blocks, std::vector<cryptonote::block_complete_entry> &blocks, std::vector<parsed_block> &parsed_blocks, bool &last, bool &error, std::exception_ptr &exception);
|
||||
void process_parsed_blocks(uint64_t start_height, const std::vector<cryptonote::block_complete_entry> &blocks, const std::vector<parsed_block> &parsed_blocks, uint64_t& blocks_added, std::map<std::pair<uint64_t, uint64_t>, size_t> *output_tracker_cache = NULL);
|
||||
bool accept_pool_tx_for_processing(const crypto::hash &txid);
|
||||
void process_unconfirmed_transfer(bool incremental, const crypto::hash &txid, wallet2::unconfirmed_transfer_details &tx_details, bool seen_in_pool, std::chrono::system_clock::time_point now, bool refreshed);
|
||||
void update_pool_state_by_pool_query(std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed = false);
|
||||
void update_pool_state_from_pool_data(const cryptonote::COMMAND_RPC_GET_BLOCKS_FAST::response &res, std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> &process_txs, bool refreshed);
|
||||
uint64_t select_transfers(uint64_t needed_money, std::vector<size_t> unused_transfers_indices, std::vector<size_t>& selected_transfers) const;
|
||||
bool prepare_file_names(const std::string& file_path);
|
||||
void process_unconfirmed(const crypto::hash &txid, const cryptonote::transaction& tx, uint64_t height);
|
||||
@ -1875,6 +1879,8 @@ private:
|
||||
// If m_refresh_from_block_height is explicitly set to zero we need this to differentiate it from the case that
|
||||
// m_refresh_from_block_height was defaulted to zero.*/
|
||||
bool m_explicit_refresh_from_block_height;
|
||||
uint64_t m_pool_info_query_time;
|
||||
std::vector<std::tuple<cryptonote::transaction, crypto::hash, bool>> m_process_pool_txs;
|
||||
uint64_t m_skip_to_height;
|
||||
// m_skip_to_height is useful when we don't want to modify the wallet's restore height.
|
||||
// m_refresh_from_block_height is also a wallet's restore height which should remain constant unless explicitly modified by the user.
|
||||
|
Loading…
Reference in New Issue
Block a user