mirror of
https://codeberg.org/anoncontributorxmr/monero.git
synced 2024-11-23 10:37:37 -07:00
Allow the number of incoming connections to be limited
It was already possible to limit outgoing connections. One might want to do this on home network connections with high bandwidth but low usage caps.
This commit is contained in:
parent
d609a2c164
commit
32c0f908cd
@ -77,6 +77,8 @@ class async_protocol_handler_config
|
||||
levin_commands_handler<t_connection_context>* m_pcommands_handler;
|
||||
void (*m_pcommands_handler_destroy)(levin_commands_handler<t_connection_context>*);
|
||||
|
||||
void delete_connections (size_t count, bool incoming);
|
||||
|
||||
public:
|
||||
typedef t_connection_context connection_context;
|
||||
uint64_t m_max_packet_size;
|
||||
@ -101,6 +103,7 @@ public:
|
||||
{}
|
||||
~async_protocol_handler_config() { set_handler(NULL, NULL); }
|
||||
void del_out_connections(size_t count);
|
||||
void del_in_connections(size_t count);
|
||||
};
|
||||
|
||||
|
||||
@ -731,41 +734,50 @@ void async_protocol_handler_config<t_connection_context>::del_connection(async_p
|
||||
}
|
||||
//------------------------------------------------------------------------------------------
|
||||
template<class t_connection_context>
|
||||
void async_protocol_handler_config<t_connection_context>::delete_connections(size_t count, bool incoming)
|
||||
{
|
||||
std::vector <boost::uuids::uuid> connections;
|
||||
CRITICAL_REGION_BEGIN(m_connects_lock);
|
||||
for (auto& c: m_connects)
|
||||
{
|
||||
if (c.second->m_connection_context.m_is_income == incoming)
|
||||
connections.push_back(c.first);
|
||||
}
|
||||
|
||||
// close random connections from the provided set
|
||||
// TODO or better just keep removing random elements (performance)
|
||||
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
shuffle(connections.begin(), connections.end(), std::default_random_engine(seed));
|
||||
while (count > 0 && connections.size() > 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
auto i = connections.end() - 1;
|
||||
async_protocol_handler<t_connection_context> *conn = m_connects.at(*i);
|
||||
del_connection(conn);
|
||||
close(*i);
|
||||
connections.erase(i);
|
||||
}
|
||||
catch (const std::out_of_range &e)
|
||||
{
|
||||
MWARNING("Connection not found in m_connects, continuing");
|
||||
}
|
||||
--count;
|
||||
}
|
||||
|
||||
CRITICAL_REGION_END();
|
||||
}
|
||||
//------------------------------------------------------------------------------------------
|
||||
template<class t_connection_context>
|
||||
void async_protocol_handler_config<t_connection_context>::del_out_connections(size_t count)
|
||||
{
|
||||
std::vector <boost::uuids::uuid> out_connections;
|
||||
CRITICAL_REGION_BEGIN(m_connects_lock);
|
||||
for (auto& c: m_connects)
|
||||
{
|
||||
if (!c.second->m_connection_context.m_is_income)
|
||||
out_connections.push_back(c.first);
|
||||
}
|
||||
|
||||
if (out_connections.size() == 0)
|
||||
return;
|
||||
|
||||
// close random out connections
|
||||
// TODO or better just keep removing random elements (performance)
|
||||
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
|
||||
shuffle(out_connections.begin(), out_connections.end(), std::default_random_engine(seed));
|
||||
while (count > 0 && out_connections.size() > 0)
|
||||
{
|
||||
try
|
||||
{
|
||||
auto i = out_connections.end() - 1;
|
||||
async_protocol_handler<t_connection_context> *conn = m_connects.at(*i);
|
||||
del_connection(conn);
|
||||
close(*i);
|
||||
out_connections.erase(i);
|
||||
}
|
||||
catch (const std::out_of_range &e)
|
||||
{
|
||||
MWARNING("Connection not found in m_connects, continuing");
|
||||
}
|
||||
--count;
|
||||
}
|
||||
|
||||
CRITICAL_REGION_END();
|
||||
delete_connections(count, false);
|
||||
}
|
||||
//------------------------------------------------------------------------------------------
|
||||
template<class t_connection_context>
|
||||
void async_protocol_handler_config<t_connection_context>::del_in_connections(size_t count)
|
||||
{
|
||||
delete_connections(count, true);
|
||||
}
|
||||
//------------------------------------------------------------------------------------------
|
||||
template<class t_connection_context>
|
||||
|
@ -428,6 +428,23 @@ bool t_command_parser_executor::out_peers(const std::vector<std::string>& args)
|
||||
return m_executor.out_peers(limit);
|
||||
}
|
||||
|
||||
bool t_command_parser_executor::in_peers(const std::vector<std::string>& args)
|
||||
{
|
||||
if (args.empty()) return false;
|
||||
|
||||
unsigned int limit;
|
||||
try {
|
||||
limit = std::stoi(args[0]);
|
||||
}
|
||||
|
||||
catch(const std::exception& ex) {
|
||||
_erro("stoi exception");
|
||||
return false;
|
||||
}
|
||||
|
||||
return m_executor.in_peers(limit);
|
||||
}
|
||||
|
||||
bool t_command_parser_executor::start_save_graph(const std::vector<std::string>& args)
|
||||
{
|
||||
if (!args.empty()) return false;
|
||||
|
@ -108,7 +108,9 @@ public:
|
||||
bool set_limit_down(const std::vector<std::string>& args);
|
||||
|
||||
bool out_peers(const std::vector<std::string>& args);
|
||||
|
||||
|
||||
bool in_peers(const std::vector<std::string>& args);
|
||||
|
||||
bool start_save_graph(const std::vector<std::string>& args);
|
||||
|
||||
bool stop_save_graph(const std::vector<std::string>& args);
|
||||
|
@ -196,6 +196,12 @@ t_command_server::t_command_server(
|
||||
, "out_peers <max_number>"
|
||||
, "Set the <max_number> of out peers."
|
||||
);
|
||||
m_command_lookup.set_handler(
|
||||
"in_peers"
|
||||
, std::bind(&t_command_parser_executor::in_peers, &m_parser, p::_1)
|
||||
, "in_peers <max_number>"
|
||||
, "Set the <max_number> of in peers."
|
||||
);
|
||||
m_command_lookup.set_handler(
|
||||
"start_save_graph"
|
||||
, std::bind(&t_command_parser_executor::start_save_graph, &m_parser, p::_1)
|
||||
|
@ -1281,6 +1281,38 @@ bool t_rpc_command_executor::out_peers(uint64_t limit)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool t_rpc_command_executor::in_peers(uint64_t limit)
|
||||
{
|
||||
cryptonote::COMMAND_RPC_IN_PEERS::request req;
|
||||
cryptonote::COMMAND_RPC_IN_PEERS::response res;
|
||||
|
||||
epee::json_rpc::error error_resp;
|
||||
|
||||
req.in_peers = limit;
|
||||
|
||||
std::string fail_message = "Unsuccessful";
|
||||
|
||||
if (m_is_rpc)
|
||||
{
|
||||
if (!m_rpc_client->json_rpc_request(req, res, "in_peers", fail_message.c_str()))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!m_rpc_server->on_in_peers(req, res) || res.status != CORE_RPC_STATUS_OK)
|
||||
{
|
||||
tools::fail_msg_writer() << make_error(fail_message, res.status);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Max number of in peers set to " << limit << std::endl;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool t_rpc_command_executor::start_save_graph()
|
||||
{
|
||||
cryptonote::COMMAND_RPC_START_SAVE_GRAPH::request req;
|
||||
|
@ -122,7 +122,9 @@ public:
|
||||
bool set_limit(int64_t limit_down, int64_t limit_up);
|
||||
|
||||
bool out_peers(uint64_t limit);
|
||||
|
||||
|
||||
bool in_peers(uint64_t limit);
|
||||
|
||||
bool start_save_graph();
|
||||
|
||||
bool stop_save_graph();
|
||||
|
@ -55,6 +55,7 @@ namespace nodetool
|
||||
|
||||
const command_line::arg_descriptor<bool> arg_no_igd = {"no-igd", "Disable UPnP port mapping"};
|
||||
const command_line::arg_descriptor<int64_t> arg_out_peers = {"out-peers", "set max number of out peers", -1};
|
||||
const command_line::arg_descriptor<int64_t> arg_in_peers = {"in-peers", "set max number of in peers", -1};
|
||||
const command_line::arg_descriptor<int> arg_tos_flag = {"tos-flag", "set TOS flag", -1};
|
||||
|
||||
const command_line::arg_descriptor<int64_t> arg_limit_rate_up = {"limit-rate-up", "set limit-rate-up [kB/s]", -1};
|
||||
|
@ -81,6 +81,7 @@ namespace nodetool
|
||||
node_server(t_payload_net_handler& payload_handler)
|
||||
:m_payload_handler(payload_handler),
|
||||
m_current_number_of_out_peers(0),
|
||||
m_current_number_of_in_peers(0),
|
||||
m_allow_local_ip(false),
|
||||
m_hide_my_port(false),
|
||||
m_no_igd(false),
|
||||
@ -117,8 +118,10 @@ namespace nodetool
|
||||
bool log_connections();
|
||||
virtual uint64_t get_connections_count();
|
||||
size_t get_outgoing_connections_count();
|
||||
size_t get_incoming_connections_count();
|
||||
peerlist_manager& get_peerlist_manager(){return m_peerlist;}
|
||||
void delete_out_connections(size_t count);
|
||||
void delete_in_connections(size_t count);
|
||||
virtual bool block_host(const epee::net_utils::network_address &adress, time_t seconds = P2P_IP_BLOCKTIME);
|
||||
virtual bool unblock_host(const epee::net_utils::network_address &address);
|
||||
virtual std::map<std::string, time_t> get_blocked_hosts() { CRITICAL_REGION_LOCAL(m_blocked_hosts_lock); return m_blocked_hosts; }
|
||||
@ -230,6 +233,7 @@ namespace nodetool
|
||||
bool parse_peers_and_add_to_container(const boost::program_options::variables_map& vm, const command_line::arg_descriptor<std::vector<std::string> > & arg, Container& container);
|
||||
|
||||
bool set_max_out_peers(const boost::program_options::variables_map& vm, int64_t max);
|
||||
bool set_max_in_peers(const boost::program_options::variables_map& vm, int64_t max);
|
||||
bool set_tos_flag(const boost::program_options::variables_map& vm, int limit);
|
||||
|
||||
bool set_rate_up_limit(const boost::program_options::variables_map& vm, int64_t limit);
|
||||
@ -271,6 +275,7 @@ namespace nodetool
|
||||
public:
|
||||
config m_config; // TODO was private, add getters?
|
||||
std::atomic<unsigned int> m_current_number_of_out_peers;
|
||||
std::atomic<unsigned int> m_current_number_of_in_peers;
|
||||
|
||||
void set_save_graph(bool save_graph)
|
||||
{
|
||||
@ -345,6 +350,7 @@ namespace nodetool
|
||||
extern const command_line::arg_descriptor<bool> arg_no_igd;
|
||||
extern const command_line::arg_descriptor<bool> arg_offline;
|
||||
extern const command_line::arg_descriptor<int64_t> arg_out_peers;
|
||||
extern const command_line::arg_descriptor<int64_t> arg_in_peers;
|
||||
extern const command_line::arg_descriptor<int> arg_tos_flag;
|
||||
|
||||
extern const command_line::arg_descriptor<int64_t> arg_limit_rate_up;
|
||||
|
@ -85,6 +85,7 @@ namespace nodetool
|
||||
command_line::add_arg(desc, arg_p2p_hide_my_port);
|
||||
command_line::add_arg(desc, arg_no_igd);
|
||||
command_line::add_arg(desc, arg_out_peers);
|
||||
command_line::add_arg(desc, arg_in_peers);
|
||||
command_line::add_arg(desc, arg_tos_flag);
|
||||
command_line::add_arg(desc, arg_limit_rate_up);
|
||||
command_line::add_arg(desc, arg_limit_rate_down);
|
||||
@ -315,6 +316,9 @@ namespace nodetool
|
||||
if ( !set_max_out_peers(vm, command_line::get_arg(vm, arg_out_peers) ) )
|
||||
return false;
|
||||
|
||||
if ( !set_max_in_peers(vm, command_line::get_arg(vm, arg_in_peers) ) )
|
||||
return false;
|
||||
|
||||
if ( !set_tos_flag(vm, command_line::get_arg(vm, arg_tos_flag) ) )
|
||||
return false;
|
||||
|
||||
@ -565,14 +569,23 @@ namespace nodetool
|
||||
while (!is_closing && !m_net_server.is_stop_signal_sent())
|
||||
{ // main loop of thread
|
||||
//number_of_peers = m_net_server.get_config_object().get_connections_count();
|
||||
unsigned int number_of_peers = 0;
|
||||
unsigned int number_of_in_peers = 0;
|
||||
unsigned int number_of_out_peers = 0;
|
||||
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
||||
{
|
||||
if (!cntxt.m_is_income) ++number_of_peers;
|
||||
if (cntxt.m_is_income)
|
||||
{
|
||||
++number_of_in_peers;
|
||||
}
|
||||
else
|
||||
{
|
||||
++number_of_out_peers;
|
||||
}
|
||||
return true;
|
||||
}); // lambda
|
||||
|
||||
m_current_number_of_out_peers = number_of_peers;
|
||||
m_current_number_of_in_peers = number_of_in_peers;
|
||||
m_current_number_of_out_peers = number_of_out_peers;
|
||||
|
||||
boost::this_thread::sleep_for(boost::chrono::seconds(1));
|
||||
} // main loop of thread
|
||||
@ -1253,6 +1266,20 @@ namespace nodetool
|
||||
}
|
||||
//-----------------------------------------------------------------------------------
|
||||
template<class t_payload_net_handler>
|
||||
size_t node_server<t_payload_net_handler>::get_incoming_connections_count()
|
||||
{
|
||||
size_t count = 0;
|
||||
m_net_server.get_config_object().foreach_connection([&](const p2p_connection_context& cntxt)
|
||||
{
|
||||
if(cntxt.m_is_income)
|
||||
++count;
|
||||
return true;
|
||||
});
|
||||
|
||||
return count;
|
||||
}
|
||||
//-----------------------------------------------------------------------------------
|
||||
template<class t_payload_net_handler>
|
||||
bool node_server<t_payload_net_handler>::idle_worker()
|
||||
{
|
||||
m_peer_handshake_idle_maker_interval.do_call(boost::bind(&node_server<t_payload_net_handler>::peer_sync_idle_maker, this));
|
||||
@ -1618,6 +1645,13 @@ namespace nodetool
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (m_current_number_of_in_peers >= m_config.m_net_config.max_in_connection_count) // in peers limit
|
||||
{
|
||||
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came, but already have max incoming connections, so dropping this one.");
|
||||
drop_connection(context);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if(!m_payload_handler.process_payload_sync_data(arg.payload_data, context, true))
|
||||
{
|
||||
LOG_WARNING_CC(context, "COMMAND_HANDSHAKE came, but process_payload_sync_data returned false, dropping connection.");
|
||||
@ -1786,12 +1820,29 @@ namespace nodetool
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class t_payload_net_handler>
|
||||
bool node_server<t_payload_net_handler>::set_max_in_peers(const boost::program_options::variables_map& vm, int64_t max)
|
||||
{
|
||||
if(max == -1) {
|
||||
m_config.m_net_config.max_in_connection_count = -1;
|
||||
return true;
|
||||
}
|
||||
m_config.m_net_config.max_in_connection_count = max;
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class t_payload_net_handler>
|
||||
void node_server<t_payload_net_handler>::delete_out_connections(size_t count)
|
||||
{
|
||||
m_net_server.get_config_object().del_out_connections(count);
|
||||
}
|
||||
|
||||
template<class t_payload_net_handler>
|
||||
void node_server<t_payload_net_handler>::delete_in_connections(size_t count)
|
||||
{
|
||||
m_net_server.get_config_object().del_in_connections(count);
|
||||
}
|
||||
|
||||
template<class t_payload_net_handler>
|
||||
bool node_server<t_payload_net_handler>::set_tos_flag(const boost::program_options::variables_map& vm, int flag)
|
||||
{
|
||||
|
@ -132,12 +132,14 @@ namespace nodetool
|
||||
{
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE(max_out_connection_count)
|
||||
KV_SERIALIZE(max_in_connection_count)
|
||||
KV_SERIALIZE(handshake_interval)
|
||||
KV_SERIALIZE(packet_max_size)
|
||||
KV_SERIALIZE(config_id)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
|
||||
uint32_t max_out_connection_count;
|
||||
uint32_t max_in_connection_count;
|
||||
uint32_t connection_timeout;
|
||||
uint32_t ping_connection_timeout;
|
||||
uint32_t handshake_interval;
|
||||
|
@ -1603,6 +1603,18 @@ namespace cryptonote
|
||||
return true;
|
||||
}
|
||||
//------------------------------------------------------------------------------------------------------------------------------
|
||||
bool core_rpc_server::on_in_peers(const COMMAND_RPC_IN_PEERS::request& req, COMMAND_RPC_IN_PEERS::response& res)
|
||||
{
|
||||
PERF_TIMER(on_in_peers);
|
||||
size_t n_connections = m_p2p.get_incoming_connections_count();
|
||||
size_t n_delete = (n_connections > req.in_peers) ? n_connections - req.in_peers : 0;
|
||||
m_p2p.m_config.m_net_config.max_in_connection_count = req.in_peers;
|
||||
if (n_delete)
|
||||
m_p2p.delete_in_connections(n_delete);
|
||||
res.status = CORE_RPC_STATUS_OK;
|
||||
return true;
|
||||
}
|
||||
//------------------------------------------------------------------------------------------------------------------------------
|
||||
bool core_rpc_server::on_start_save_graph(const COMMAND_RPC_START_SAVE_GRAPH::request& req, COMMAND_RPC_START_SAVE_GRAPH::response& res)
|
||||
{
|
||||
PERF_TIMER(on_start_save_graph);
|
||||
|
@ -114,6 +114,7 @@ namespace cryptonote
|
||||
MAP_URI_AUTO_JON2("/get_limit", on_get_limit, COMMAND_RPC_GET_LIMIT)
|
||||
MAP_URI_AUTO_JON2_IF("/set_limit", on_set_limit, COMMAND_RPC_SET_LIMIT, !m_restricted)
|
||||
MAP_URI_AUTO_JON2_IF("/out_peers", on_out_peers, COMMAND_RPC_OUT_PEERS, !m_restricted)
|
||||
MAP_URI_AUTO_JON2_IF("/in_peers", on_in_peers, COMMAND_RPC_IN_PEERS, !m_restricted)
|
||||
MAP_URI_AUTO_JON2_IF("/start_save_graph", on_start_save_graph, COMMAND_RPC_START_SAVE_GRAPH, !m_restricted)
|
||||
MAP_URI_AUTO_JON2_IF("/stop_save_graph", on_stop_save_graph, COMMAND_RPC_STOP_SAVE_GRAPH, !m_restricted)
|
||||
MAP_URI_AUTO_JON2("/get_outs", on_get_outs, COMMAND_RPC_GET_OUTPUTS)
|
||||
@ -183,6 +184,7 @@ namespace cryptonote
|
||||
bool on_get_limit(const COMMAND_RPC_GET_LIMIT::request& req, COMMAND_RPC_GET_LIMIT::response& res);
|
||||
bool on_set_limit(const COMMAND_RPC_SET_LIMIT::request& req, COMMAND_RPC_SET_LIMIT::response& res);
|
||||
bool on_out_peers(const COMMAND_RPC_OUT_PEERS::request& req, COMMAND_RPC_OUT_PEERS::response& res);
|
||||
bool on_in_peers(const COMMAND_RPC_IN_PEERS::request& req, COMMAND_RPC_IN_PEERS::response& res);
|
||||
bool on_start_save_graph(const COMMAND_RPC_START_SAVE_GRAPH::request& req, COMMAND_RPC_START_SAVE_GRAPH::response& res);
|
||||
bool on_stop_save_graph(const COMMAND_RPC_STOP_SAVE_GRAPH::request& req, COMMAND_RPC_STOP_SAVE_GRAPH::response& res);
|
||||
bool on_update(const COMMAND_RPC_UPDATE::request& req, COMMAND_RPC_UPDATE::response& res);
|
||||
|
@ -1678,8 +1678,28 @@ namespace cryptonote
|
||||
|
||||
struct response
|
||||
{
|
||||
std::string status;
|
||||
|
||||
std::string status;
|
||||
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE(status)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
};
|
||||
|
||||
struct COMMAND_RPC_IN_PEERS
|
||||
{
|
||||
struct request
|
||||
{
|
||||
uint64_t in_peers;
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE(in_peers)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
};
|
||||
|
||||
struct response
|
||||
{
|
||||
std::string status;
|
||||
|
||||
BEGIN_KV_SERIALIZE_MAP()
|
||||
KV_SERIALIZE(status)
|
||||
END_KV_SERIALIZE_MAP()
|
||||
|
Loading…
Reference in New Issue
Block a user