Patryk, please review this patch
On 1/13/25 9:07 AM, 'Gleb Natapov' via ScyllaDB development wrote:
> Do not track id to ip mapping in the topology class any longer. There
> are no remaining users.
> ---
> locator/topology.hh | 27 +-----
> locator/token_metadata.cc | 17 +---
> locator/topology.cc | 97 +++-----------------
> main.cc | 3 +-
> test/boost/locator_topology_test.cc | 97 ++++----------------
> test/boost/network_topology_strategy_test.cc | 30 ++----
> test/boost/tablets_test.cc | 6 +-
> test/lib/cql_test_env.cc | 3 +-
> test/perf/perf_sort_by_proximity.cc | 1 -
> 9 files changed, 52 insertions(+), 229 deletions(-)
>
> diff --git a/locator/topology.hh b/locator/topology.hh
> index f27c7428321..c87e8cc2790 100644
> --- a/locator/topology.hh
> +++ b/locator/topology.hh
> @@ -60,7 +60,6 @@ class node {
> private:
> const locator::topology* _topology;
> locator::host_id _host_id;
> - inet_address _endpoint;
> endpoint_dc_rack _dc_rack;
> state _state;
> shard_id _shard_count = 0;
> @@ -73,7 +72,6 @@ class node {
> public:
> node(const locator::topology* topology,
> locator::host_id id,
> - inet_address endpoint,
> endpoint_dc_rack dc_rack,
> state state,
> shard_id shard_count = 0,
> @@ -95,10 +93,6 @@ class node {
> return _host_id;
> }
>
> - const inet_address& endpoint() const noexcept {
> - return _endpoint;
> - }
> -
> const endpoint_dc_rack& dc_rack() const noexcept {
> return _dc_rack;
> }
> @@ -165,7 +159,6 @@ class node {
> private:
> static node_holder make(const locator::topology* topology,
> locator::host_id id,
> - inet_address endpoint,
> endpoint_dc_rack dc_rack,
> state state,
> shard_id shard_count = 0,
> @@ -212,7 +205,7 @@ class topology {
> }
>
> // Adds a node with given host_id, endpoint, and DC/rack.
> - const node& add_node(host_id id, const inet_address& ep, const endpoint_dc_rack& dr, node::state state,
> + const node& add_node(host_id id, const endpoint_dc_rack& dr, node::state state,
> shard_id shard_count = 0);
>
> // Optionally updates node's current host_id, endpoint, or DC/rack.
> @@ -220,7 +213,6 @@ class topology {
> // or a peer node host_id may be updated when the node is replaced with another node using the same ip address.
> void update_node(node& node,
> std::optional<host_id> opt_id,
> - std::optional<inet_address> opt_ep,
> std::optional<endpoint_dc_rack> opt_dr,
> std::optional<node::state> opt_st,
> std::optional<shard_id> opt_shard_count = std::nullopt);
> @@ -242,10 +234,6 @@ class topology {
> return *n;
> };
>
> - // Looks up a node by its inet_address.
> - // Returns a pointer to the node if found, or nullptr otherwise.
> - const node* find_node(const inet_address& ep) const noexcept;
> -
> // Finds a node by its index
> // Returns a pointer to the node if found, or nullptr otherwise.
> const node* find_node(node::idx_type idx) const noexcept;
> @@ -258,8 +246,7 @@ class topology {
> *
> * Adds or updates a node with given endpoint
> */
> - const node& add_or_update_endpoint(host_id id, std::optional<inet_address> opt_ep,
> - std::optional<endpoint_dc_rack> opt_dr = std::nullopt,
> + const node& add_or_update_endpoint(host_id id, std::optional<endpoint_dc_rack> opt_dr = std::nullopt,
> std::optional<node::state> opt_st = std::nullopt,
> std::optional<shard_id> shard_count = std::nullopt);
>
> @@ -412,7 +399,6 @@ class topology {
> const node* _this_node = nullptr;
> std::vector<node_holder> _nodes;
> std::unordered_map<host_id, std::reference_wrapper<const node>> _nodes_by_host_id;
> - std::unordered_map<inet_address, std::reference_wrapper<const node>> _nodes_by_endpoint;
>
> std::unordered_map<sstring, std::unordered_set<std::reference_wrapper<const node>>> _dc_nodes;
> std::unordered_map<sstring, std::unordered_map<sstring, std::unordered_set<std::reference_wrapper<const node>>>> _dc_rack_nodes;
> @@ -435,10 +421,6 @@ class topology {
>
> void calculate_datacenters();
>
> - const std::unordered_map<inet_address, std::reference_wrapper<const node>>& get_nodes_by_endpoint() const noexcept {
> - return _nodes_by_endpoint;
> - };
> -
> mutable random_engine_type _random_engine;
>
> friend class token_metadata_impl;
> @@ -492,12 +474,11 @@ struct fmt::formatter<locator::node> : fmt::formatter<string_view> {
> template <typename FormatContext>
> auto format(const locator::node& node, FormatContext& ctx) const {
> if (!verbose) {
> - return fmt::format_to(ctx.out(), "{}/{}", node.host_id(), node.endpoint());
> + return fmt::format_to(ctx.out(), "{}", node.host_id());
> } else {
> - return fmt::format_to(ctx.out(), " idx={} host_id={} endpoint={} dc={} rack={} state={} shards={} this_node={}",
> + return fmt::format_to(ctx.out(), " idx={} host_id={} dc={} rack={} state={} shards={} this_node={}",
> node.idx(),
> node.host_id(),
> - node.endpoint(),
> node.dc_rack().dc,
> node.dc_rack().rack,
> locator::node::to_string(node.get_state()),
> diff --git a/locator/token_metadata.cc b/locator/token_metadata.cc
> index 27b8e991285..519a6647549 100644
> --- a/locator/token_metadata.cc
> +++ b/locator/token_metadata.cc
> @@ -116,7 +116,7 @@ class token_metadata_impl final {
> }
>
> void update_topology(host_id id, std::optional<endpoint_dc_rack> opt_dr, std::optional<node::state> opt_st, std::optional<shard_id> shard_count = std::nullopt) {
> - _topology.add_or_update_endpoint(id, std::nullopt, std::move(opt_dr), std::move(opt_st), std::move(shard_count));
> + _topology.add_or_update_endpoint(id, std::move(opt_dr), std::move(opt_st), std::move(shard_count));
> }
>
> /**
> @@ -141,15 +141,6 @@ class token_metadata_impl final {
>
> void debug_show() const;
>
> - /**
> - * Store an end-point to host ID mapping. Each ID must be unique, and
> - * cannot be changed after the fact.
> - *
> - * @param hostId
> - * @param endpoint
> - */
> - void update_host_id(const host_id& host_id, inet_address endpoint);
> -
> /** @return a copy of host id set for read-only operations */
> std::unordered_set<host_id> get_host_ids() const;
>
> @@ -516,10 +507,6 @@ void token_metadata_impl::debug_show() const {
> reporter->arm_periodic(std::chrono::seconds(1));
> }
>
> -void token_metadata_impl::update_host_id(const host_id& host_id, inet_address endpoint) {
> - _topology.add_or_update_endpoint(host_id, endpoint);
> -}
> -
> std::unordered_set<host_id> token_metadata_impl::get_host_ids() const {
> return _topology.get_nodes() |
> std::views::filter([&] (const node& n) { return !n.left() && !n.is_none(); }) |
> @@ -964,7 +951,7 @@ token_metadata::debug_show() const {
>
> void
> token_metadata::update_host_id(const host_id& host_id, inet_address endpoint) {
> - _impl->update_host_id(host_id, endpoint);
> + // Do nothing for now. Remove later.
> }
>
> std::unordered_set<host_id>
> diff --git a/locator/topology.cc b/locator/topology.cc
> index 195c512a11a..8c03ac33b12 100644
> --- a/locator/topology.cc
> +++ b/locator/topology.cc
> @@ -54,10 +54,9 @@ thread_local const endpoint_dc_rack endpoint_dc_rack::default_location = {
> .rack = locator::production_snitch_base::default_rack,
> };
>
> -node::node(const locator::topology* topology, locator::host_id id, inet_address endpoint, endpoint_dc_rack dc_rack, state state, shard_id shard_count, this_node is_this_node, node::idx_type idx)
> +node::node(const locator::topology* topology, locator::host_id id, endpoint_dc_rack dc_rack, state state, shard_id shard_count, this_node is_this_node, node::idx_type idx)
> : _topology(topology)
> , _host_id(id)
> - , _endpoint(endpoint)
> , _dc_rack(std::move(dc_rack))
> , _state(state)
> , _shard_count(std::move(shard_count))
> @@ -65,12 +64,12 @@ node::node(const locator::topology* topology, locator::host_id id, inet_address
> , _idx(idx)
> {}
>
> -node_holder node::make(const locator::topology* topology, locator::host_id id, inet_address endpoint, endpoint_dc_rack dc_rack, state state, shard_id shard_count, node::this_node is_this_node, node::idx_type idx) {
> - return std::make_unique<node>(topology, std::move(id), std::move(endpoint), std::move(dc_rack), std::move(state), shard_count, is_this_node, idx);
> +node_holder node::make(const locator::topology* topology, locator::host_id id, endpoint_dc_rack dc_rack, state state, shard_id shard_count, node::this_node is_this_node, node::idx_type idx) {
> + return std::make_unique<node>(topology, std::move(id), std::move(dc_rack), std::move(state), shard_count, is_this_node, idx);
> }
>
> node_holder node::clone() const {
> - return make(nullptr, host_id(), endpoint(), dc_rack(), get_state(), get_shard_count(), is_this_node());
> + return make(nullptr, host_id(), dc_rack(), get_state(), get_shard_count(), is_this_node());
> }
>
> std::string node::to_string(node::state s) {
> @@ -94,7 +93,6 @@ future<> topology::clear_gently() noexcept {
> _datacenters.clear();
> _dc_rack_nodes.clear();
> _dc_nodes.clear();
> - _nodes_by_endpoint.clear();
> _nodes_by_host_id.clear();
> co_await utils::clear_gently(_nodes);
> }
> @@ -115,7 +113,7 @@ topology::topology(config cfg)
> {
> tlogger.trace("topology[{}]: constructing using config: endpoint={} id={} dc={} rack={}", fmt::ptr(this),
> cfg.this_endpoint, cfg.this_host_id, cfg.local_dc_rack.dc, cfg.local_dc_rack.rack);
> - add_node(cfg.this_host_id, cfg.this_endpoint, cfg.local_dc_rack, node::state::none);
> + add_node(cfg.this_host_id, cfg.local_dc_rack, node::state::none);
> }
>
> topology::topology(topology&& o) noexcept
> @@ -124,7 +122,6 @@ topology::topology(topology&& o) noexcept
> , _this_node(std::exchange(o._this_node, nullptr))
> , _nodes(std::move(o._nodes))
> , _nodes_by_host_id(std::move(o._nodes_by_host_id))
> - , _nodes_by_endpoint(std::move(o._nodes_by_endpoint))
> , _dc_nodes(std::move(o._dc_nodes))
> , _dc_rack_nodes(std::move(o._dc_rack_nodes))
> , _dc_endpoints(std::move(o._dc_endpoints))
> @@ -170,7 +167,7 @@ void topology::set_host_id_cfg(host_id this_host_id) {
> tlogger.trace("topology[{}]: set host id to {}", fmt::ptr(this), this_host_id);
>
> _cfg.this_host_id = this_host_id;
> - add_or_update_endpoint(this_host_id, _cfg.this_endpoint);
> + add_or_update_endpoint(this_host_id);
> }
>
> future<topology> topology::clone_gently() const {
> @@ -187,21 +184,15 @@ future<topology> topology::clone_gently() const {
> co_return ret;
> }
>
> -const node& topology::add_node(host_id id, const inet_address& ep, const endpoint_dc_rack& dr, node::state state, shard_id shard_count) {
> +const node& topology::add_node(host_id id, const endpoint_dc_rack& dr, node::state state, shard_id shard_count) {
> if (dr.dc.empty() || dr.rack.empty()) {
> on_internal_error(tlogger, "Node must have valid dc and rack");
> }
> - return add_node(node::make(this, id, ep, dr, state, shard_count));
> + return add_node(node::make(this, id, dr, state, shard_count));
> }
>
> bool topology::is_configured_this_node(const node& n) const {
> - if (_cfg.this_host_id && n.host_id()) { // Selection by host_id
> - return _cfg.this_host_id == n.host_id();
> - }
> - if (_cfg.this_endpoint != inet_address()) { // Selection by endpoint
> - return _cfg.this_endpoint == n.endpoint();
> - }
> - return false; // No selection;
> + return _cfg.this_host_id == n.host_id();
> }
>
> const node& topology::add_node(node_holder nptr) {
> @@ -244,10 +235,9 @@ const node& topology::add_node(node_holder nptr) {
> return *node;
> }
>
> -void topology::update_node(node& node, std::optional<host_id> opt_id, std::optional<inet_address> opt_ep, std::optional<endpoint_dc_rack> opt_dr, std::optional<node::state> opt_st, std::optional<shard_id> opt_shard_count) {
> - tlogger.debug("topology[{}]: update_node: {}: to: host_id={} endpoint={} dc={} rack={} state={} shard_count={}, at {}", fmt::ptr(this), node_printer(&node),
> +void topology::update_node(node& node, std::optional<host_id> opt_id, std::optional<endpoint_dc_rack> opt_dr, std::optional<node::state> opt_st, std::optional<shard_id> opt_shard_count) {
> + tlogger.debug("topology[{}]: update_node: {}: to: host_id={} dc={} rack={} state={} shard_count={}, at {}", fmt::ptr(this), node_printer(&node),
> opt_id ? format("{}", *opt_id) : "unchanged",
> - opt_ep ? format("{}", *opt_ep) : "unchanged",
> opt_dr ? format("{}", opt_dr->dc) : "unchanged",
> opt_dr ? format("{}", opt_dr->rack) : "unchanged",
> opt_st ? format("{}", *opt_st) : "unchanged",
> @@ -271,16 +261,6 @@ void topology::update_node(node& node, std::optional<host_id> opt_id, std::optio
> opt_id.reset();
> }
> }
> - if (opt_ep) {
> - if (*opt_ep != node.endpoint()) {
> - if (*opt_ep == inet_address{}) {
> - on_internal_error(tlogger, seastar::format("Updating node endpoint to null is disallowed: {}: new endpoint={}", node_printer(&node), *opt_ep));
> - }
> - changed = true;
> - } else {
> - opt_ep.reset();
> - }
> - }
> if (opt_dr) {
> if (opt_dr->dc.empty() || opt_dr->dc == production_snitch_base::default_dc) {
> opt_dr->dc = node.dc_rack().dc;
> @@ -311,9 +291,6 @@ void topology::update_node(node& node, std::optional<host_id> opt_id, std::optio
> if (opt_id) {
> node._host_id = *opt_id;
> }
> - if (opt_ep) {
> - node._endpoint = *opt_ep;
> - }
> if (opt_dr) {
> node._dc_rack = std::move(*opt_dr);
> }
> @@ -359,32 +336,6 @@ void topology::index_node(const node& node) {
> if (!inserted_host_id) {
> on_internal_error(tlogger, seastar::format("topology[{}]: {}: node already exists", fmt::ptr(this), node_printer(&node)));
> }
> - if (node.endpoint() != inet_address{}) {
> - auto eit = _nodes_by_endpoint.find(node.endpoint());
> - if (eit != _nodes_by_endpoint.end()) {
> - if (eit->second.get().get_state() == node::state::none && eit->second.get().is_this_node()) {
> - // eit->second is default entry created for local node and it is replaced by existing node with the same ip
> - // it means this node is going to replace the existing node with the same ip, but it does not know it yet
> - // map ip to the old node
> - _nodes_by_endpoint.erase(node.endpoint());
> - } else if (eit->second.get().get_state() == node::state::replacing && node.get_state() == node::state::being_replaced) {
> - // replace-with-same-ip, map ip to the old node
> - _nodes_by_endpoint.erase(node.endpoint());
> - } else if (eit->second.get().get_state() == node::state::being_replaced && node.get_state() == node::state::replacing) {
> - // replace-with-same-ip, map ip to the old node, do nothing if it's already the case
> - } else if (eit->second.get().is_leaving() || eit->second.get().left()) {
> - _nodes_by_endpoint.erase(node.endpoint());
> - } else if (!node.is_leaving() && !node.left()) {
> - if (node.host_id()) {
> - _nodes_by_host_id.erase(node.host_id());
> - }
> - on_internal_error(tlogger, seastar::format("topology[{}]: {}: node endpoint already mapped to {}", fmt::ptr(this), node_printer(&node), node_printer(&eit->second.get())));
> - }
> - }
> - if (!node.left() && !node.is_none()) {
> - _nodes_by_endpoint.try_emplace(node.endpoint(), std::cref(node));
> - }
> - }
>
> // We keep location of left nodes because they may still appear in tablet replica sets
> // and algorithms expect to know which dc they belonged to. View replica pairing needs stable
> @@ -441,10 +392,6 @@ void topology::unindex_node(const node& node) {
> if (host_it != _nodes_by_host_id.end() && host_it->second == node) {
> _nodes_by_host_id.erase(host_it);
> }
> - auto ep_it = _nodes_by_endpoint.find(node.endpoint());
> - if (ep_it != _nodes_by_endpoint.end() && ep_it->second.get() == node) {
> - _nodes_by_endpoint.erase(ep_it);
> - }
> if (_this_node == &node) {
> _this_node = nullptr;
> }
> @@ -483,16 +430,6 @@ node* topology::find_node(host_id id) noexcept {
> return make_mutable(const_cast<const topology*>(this)->find_node(id));
> }
>
> -// Finds a node by its endpoint
> -// Returns nullptr if not found
> -const node* topology::find_node(const inet_address& ep) const noexcept {
> - auto it = _nodes_by_endpoint.find(ep);
> - if (it != _nodes_by_endpoint.end()) {
> - return &it->second.get();
> - }
> - return nullptr;
> -}
> -
> // Finds a node by its index
> // Returns nullptr if not found
> const node* topology::find_node(node::idx_type idx) const noexcept {
> @@ -502,23 +439,19 @@ const node* topology::find_node(node::idx_type idx) const noexcept {
> return _
nodes.at(idx).get();
> }
>
> -const node& topology::add_or_update_endpoint(host_id id, std::optional<inet_address> opt_ep, std::optional<endpoint_dc_rack> opt_dr, std::optional<node::state> opt_st, std::optional<shard_id> shard_count)
> +const node& topology::add_or_update_endpoint(host_id id, std::optional<endpoint_dc_rack> opt_dr, std::optional<node::state> opt_st, std::optional<shard_id> shard_count)
> {
> - tlogger.trace("topology[{}]: add_or_update_endpoint: host_id={} ep={} dc={} rack={} state={} shards={}, at {}", fmt::ptr(this),
> - id, opt_ep, opt_dr.value_or(endpoint_dc_rack{}).dc, opt_dr.value_or(endpoint_dc_rack{}).rack, opt_st.value_or(node::state::none), shard_count,
> + tlogger.trace("topology[{}]: add_or_update_endpoint: host_id={} dc={} rack={} state={} shards={}, at {}", fmt::ptr(this),
> + id, opt_dr.value_or(endpoint_dc_rack{}).dc, opt_dr.value_or(endpoint_dc_rack{}).rack, opt_st.value_or(node::state::none), shard_count,
> lazy_backtrace());
>
> auto* n = find_node(id);
> if (n) {
> - update_node(*n, std::nullopt, opt_ep, std::move(opt_dr), std::move(opt_st), std::move(shard_count));
> - return *n;
> - } else if (opt_ep && (n = make_mutable(find_node(*opt_ep)))) {
> - update_node(*n, id, std::nullopt, std::move(opt_dr), std::move(opt_st), std::move(shard_count));
> + update_node(*n, std::nullopt, std::move(opt_dr), std::move(opt_st), std::move(shard_count));
> return *n;
> }
>
> return add_node(id,
> - opt_ep.value_or(inet_address{}),
> opt_dr.value_or(endpoint_dc_rack::default_location),
> opt_st.value_or(node::state::none),
> shard_count.value_or(0));
> diff --git a/main.cc b/main.cc
> index 9d394363ca3..263a41ee160 100644
> --- a/main.cc
> +++ b/main.cc
> @@ -1437,12 +1437,11 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
> const auto listen_address = utils::resolve(cfg->listen_address, family).get();
> const auto host_id = initialize_local_info_thread(sys_ks, snitch, listen_address, *cfg, broadcast_addr, broadcast_rpc_addr);
>
> - shared_token_metadata::mutate_on_all_shards(token_metadata, [host_id, endpoint = broadcast_addr] (locator::token_metadata& tm) {
> + shared_token_metadata::mutate_on_all_shards(token_metadata, [host_id] (locator::token_metadata& tm) {
> // Makes local host id available in topology cfg as soon as possible.
> // Raft topology discard the endpoint-to-id map, so the local id can
> // still be found in the config.
> tm.get_topology().set_host_id_cfg(host_id);
> - tm.get_topology().add_or_update_endpoint(host_id, endpoint);
> return make_ready_future<>();
> }).get();
>
> diff --git a/test/boost/locator_topology_test.cc b/test/boost/locator_topology_test.cc
> index ccb321a11d7..f42dd743562 100644
> --- a/test/boost/locator_topology_test.cc
> +++ b/test/boost/locator_topology_test.cc
> @@ -32,9 +32,7 @@ SEASTAR_THREAD_TEST_CASE(test_add_node) {
> auto id1 = host_id::create_random_id();
> auto ep1 = gms::inet_address("127.0.0.1");
> auto id2 = host_id::create_random_id();
> - auto ep2 = gms::inet_address("127.0.0.2");
> auto id3 = host_id::create_random_id();
> - auto ep3 = gms::inet_address("127.0.0.3");
>
> topology::config cfg = {
> .this_endpoint = ep1,
> @@ -51,16 +49,13 @@ SEASTAR_THREAD_TEST_CASE(test_add_node) {
>
> std::unordered_set<std::reference_wrapper<const locator::node>> nodes;
>
> - nodes.insert(std::cref(topo.add_node(id2, ep2, endpoint_dc_rack::default_location, node::state::normal)));
> - nodes.insert(std::cref(topo.add_or_update_endpoint(id1, ep1, endpoint_dc_rack::default_location, node::state::normal)));
> + nodes.insert(std::cref(topo.add_node(id2, endpoint_dc_rack::default_location, node::state::normal)));
> + nodes.insert(std::cref(topo.add_or_update_endpoint(id1, endpoint_dc_rack::default_location, node::state::normal)));
>
> - BOOST_REQUIRE_THROW(topo.add_node(id1, ep2, endpoint_dc_rack::default_location, node::state::normal), std::runtime_error);
> - BOOST_REQUIRE_THROW(topo.add_node(id2, ep1, endpoint_dc_rack::default_location, node::state::normal), std::runtime_error);
> - BOOST_REQUIRE_THROW(topo.add_node(id2, ep2, endpoint_dc_rack::default_location, node::state::normal), std::runtime_error);
> - BOOST_REQUIRE_THROW(topo.add_node(id2, ep3, endpoint_dc_rack::default_location, node::state::normal), std::runtime_error);
> - BOOST_REQUIRE_THROW(topo.add_node(id3, ep3, endpoint_dc_rack{}, node::state::normal), std::runtime_error);
> + BOOST_REQUIRE_THROW(topo.add_node(id2, endpoint_dc_rack::default_location, node::state::normal), std::runtime_error);
> + BOOST_REQUIRE_THROW(topo.add_node(id3, endpoint_dc_rack{}, node::state::normal), std::runtime_error);
>
> - nodes.insert(std::cref(topo.add_node(id3, ep3, endpoint_dc_rack::default_location, node::state::normal)));
> + nodes.insert(std::cref(topo.add_node(id3, endpoint_dc_rack::default_location, node::state::normal)));
>
> topo.for_each_node([&] (const locator::node& node) {
> BOOST_REQUIRE(nodes.erase(std::cref(node)));
> @@ -82,7 +77,7 @@ SEASTAR_THREAD_TEST_CASE(test_moving) {
>
> auto topo = topology(cfg);
>
> - topo.add_or_update_endpoint(id1, ep1, endpoint_dc_rack::default_location, node::state::normal);
> + topo.add_or_update_endpoint(id1, endpoint_dc_rack::default_location, node::state::normal);
>
> BOOST_REQUIRE(topo.this_node()->topology() == &topo);
>
> @@ -101,8 +96,6 @@ SEASTAR_THREAD_TEST_CASE(test_update_node) {
> auto id1 = host_id::create_random_id();
> auto ep1 = gms::inet_address("127.0.0.1");
> auto id2 = host_id::create_random_id();
> - auto ep2 = gms::inet_address("127.0.0.2");
> - auto ep3 = gms::inet_address("127.0.0.3");
>
> topology::config cfg = {
> .this_endpoint = ep1,
> @@ -117,93 +110,42 @@ SEASTAR_THREAD_TEST_CASE(test_update_node) {
> set_abort_on_internal_error(true);
> });
>
> - topo.add_or_update_endpoint(id1, std::nullopt, endpoint_dc_rack::default_location, node::state::normal);
> + topo.add_or_update_endpoint(id1, endpoint_dc_rack::default_location, node::state::normal);
>
> auto node = const_cast<class node*>(topo.this_node());
>
> - topo.update_node(*node, std::nullopt, ep1, std::nullopt, std::nullopt);
> + topo.update_node(*node, std::nullopt, std::nullopt, std::nullopt);
>
> BOOST_REQUIRE_EQUAL(topo.find_node(id1), node);
>
> - BOOST_REQUIRE_THROW(topo.update_node(*node, host_id::create_null_id(), std::nullopt, std::nullopt, std::nullopt), std::runtime_error);
> - BOOST_REQUIRE_THROW(topo.update_node(*node, id2, std::nullopt, std::nullopt, std::nullopt), std::runtime_error);
> + BOOST_REQUIRE_THROW(topo.update_node(*node, host_id::create_null_id(), std::nullopt, std::nullopt), std::runtime_error);
> + BOOST_REQUIRE_THROW(topo.update_node(*node, id2, std::nullopt, std::nullopt), std::runtime_error);
> BOOST_REQUIRE_EQUAL(topo.find_node(id1), node);
> BOOST_REQUIRE_EQUAL(topo.find_node(id2), nullptr);
>
> - topo.update_node(*node, std::nullopt, ep2, std::nullopt, std::nullopt);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep1), nullptr);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep2), node);
> -
> auto dc_rack1 = endpoint_dc_rack{"DC1", "RACK1"};
> - topo.update_node(*node, std::nullopt, std::nullopt, dc_rack1, std::nullopt);
> + topo.update_node(*node, std::nullopt, dc_rack1, std::nullopt);
>
> BOOST_REQUIRE(topo.get_location(id1) == dc_rack1);
>
> auto dc_rack2 = endpoint_dc_rack{"DC2", "RACK2"};
> - topo.update_node(*node, std::nullopt, std::nullopt, dc_rack2, std::nullopt);
> + topo.update_node(*node, std::nullopt, dc_rack2, std::nullopt);
>
> BOOST_REQUIRE(topo.get_location(id1) == dc_rack2);
>
> BOOST_REQUIRE_NE(node->get_state(), locator::node::state::being_decommissioned);
> - topo.update_node(*node, std::nullopt, std::nullopt, std::nullopt, locator::node::state::being_decommissioned);
> + topo.update_node(*node, std::nullopt, std::nullopt, locator::node::state::being_decommissioned);
>
> BOOST_REQUIRE_EQUAL(node->get_state(), locator::node::state::being_decommissioned);
>
> auto dc_rack3 = endpoint_dc_rack{"DC3", "RACK3"};
> // Note: engage state option, but keep node::state value the same
> // to reproduce #13502
> - topo.update_node(*node, std::nullopt, ep3, dc_rack3, locator::node::state::being_decommissioned);
> + topo.update_node(*node, std::nullopt, dc_rack3, locator::node::state::being_decommissioned);
>
> BOOST_REQUIRE_EQUAL(topo.find_node(id1), node);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep1), nullptr);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep2), nullptr);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep3), node);
> BOOST_REQUIRE(topo.get_location(id1) == dc_rack3);
> BOOST_REQUIRE_EQUAL(node->get_state(), locator::node::state::being_decommissioned);
> -
> - // In state::left the node will remain indexed only by its host_id
> - topo.update_node(*node, std::nullopt, std::nullopt, std::nullopt, locator::node::state::left);
> - BOOST_REQUIRE_EQUAL(topo.find_node(id1), node);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep1), nullptr);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep2), nullptr);
> - BOOST_REQUIRE_EQUAL(topo.find_node(ep3), nullptr);
> - BOOST_REQUIRE(topo.get_location(id1) == dc_rack3);
> - BOOST_REQUIRE_EQUAL(node->get_state(), locator::node::state::left);
> -}
> -
> -SEASTAR_THREAD_TEST_CASE(test_add_or_update_by_host_id) {
> - auto id1 = host_id::create_random_id();
> - auto id2 = host_id::create_random_id();
> - auto ep1 = gms::inet_address("127.0.0.1");
> -
> - // In this test we check that add_or_update_endpoint searches by host_id first.
> - // We create two nodes, one matches by id, another - by ip,
> - // and SCYLLA_ASSERT that add_or_update_endpoint updates the first.
> - // We need to make the second node 'being_decommissioned', so that
> - // it gets removed from ip index and we don't get the non-unique IP error.
> -
> - auto topo = topology({
> - .this_host_id = id1,
> - .local_dc_rack = endpoint_dc_rack::default_location,
> - });
> -
> - topo.add_or_update_endpoint(id1, gms::inet_address{}, endpoint_dc_rack::default_location, node::state::normal);
> - topo.add_node(id2, ep1, endpoint_dc_rack::default_location, node::state::being_decommissioned);
> -
> - topo.add_or_update_endpoint(id1, ep1, std::nullopt, node::state::bootstrapping);
> -
> - auto* n = topo.find_node(id1);
> - BOOST_REQUIRE_EQUAL(n->get_state(), node::state::bootstrapping);
> - BOOST_REQUIRE_EQUAL(n->host_id(), id1);
> - BOOST_REQUIRE_EQUAL(n->endpoint(), ep1);
> -
> - auto* n2 = topo.find_node(ep1);
> - BOOST_REQUIRE_EQUAL(n, n2);
> -
> - auto* n3 = topo.find_node(id2);
> - BOOST_REQUIRE_EQUAL(n3->get_state(), node::state::being_decommissioned);
> - BOOST_REQUIRE_EQUAL(n3->host_id(), id2);
> - BOOST_REQUIRE_EQUAL(n3->endpoint(), ep1);
> }
>
> SEASTAR_THREAD_TEST_CASE(test_remove_endpoint) {
> @@ -214,7 +156,6 @@ SEASTAR_THREAD_TEST_CASE(test_remove_endpoint) {
> const auto id1 = host_id::create_random_id();
> const auto ep1 = gms::inet_address("127.0.0.1");
> const auto id2 = host_id::create_random_id();
> - const auto ep2 = gms::inet_address("127.0.0.2");
> const auto dc_rack1 = endpoint_dc_rack {
> .dc = "dc1",
> .rack = "rack1"
> @@ -232,8 +173,8 @@ SEASTAR_THREAD_TEST_CASE(test_remove_endpoint) {
>
> auto topo = topology(cfg);
>
> - topo.add_or_update_endpoint(id1, ep1, dc_rack1, node::state::normal);
> - topo.add_node(id2, ep2, dc_rack2, node::state::normal);
> + topo.add_or_update_endpoint(id1, dc_rack1, node::state::normal);
> + topo.add_node(id2, dc_rack2, node::state::normal);
>
> BOOST_REQUIRE_EQUAL(topo.get_datacenter_endpoints(), (dc_endpoints_t{{"dc1", {id1, id2}}}));
> BOOST_REQUIRE_EQUAL(topo.get_datacenter_racks(), (dc_racks_t{{"dc1", {{"rack1", {id1}}, {"rack2", {id2}}}}}));
> @@ -374,9 +315,7 @@ SEASTAR_THREAD_TEST_CASE(test_left_node_is_kept_outside_dc) {
> auto id1 = host_id::create_random_id();
> auto ep1 = gms::inet_address("127.0.0.1");
> auto id2 = host_id::create_random_id();
> - auto ep2 = gms::inet_address("127.0.0.2");
> auto id3 = host_id::create_random_id();
> - auto ep3 = gms::inet_address("127.0.0.3");
>
> const auto dc_rack1 = endpoint_dc_rack {
> .dc = "dc1",
> @@ -397,8 +336,8 @@ SEASTAR_THREAD_TEST_CASE(test_left_node_is_kept_outside_dc) {
>
> std::unordered_set<std::reference_wrapper<const locator::node>> nodes;
>
> - nodes.insert(std::cref(topo.add_node(id2, ep2, dc_rack1, node::state::normal)));
> - nodes.insert(std::cref(topo.add_node(id3, ep3, dc_rack1, node::state::left)));
> + nodes.insert(std::cref(topo.add_node(id2, dc_rack1, node::state::normal)));
> + nodes.insert(std::cref(topo.add_node(id3, dc_rack1, node::state::left)));
>
> topo.for_each_node([&] (const locator::node& node) {
> BOOST_REQUIRE(node.host_id() != id3);
> diff --git a/test/boost/network_topology_strategy_test.cc b/test/boost/network_topology_strategy_test.cc
> index 142e0e4f598..a175df290f7 100644
> --- a/test/boost/network_topology_strategy_test.cc
> +++ b/test/boost/network_topology_strategy_test.cc
> @@ -303,7 +303,7 @@ void simple_test() {
> for (const auto& [ring_point, endpoint, id] : ring_points) {
> std::unordered_set<token> tokens;
> tokens.insert(token{tests::d2t(ring_point / ring_points.size())});
> - topo.add_node(id, endpoint, make_endpoint_dc_rack(endpoint), locator::node::state::normal);
> + topo.add_node(id, make_endpoint_dc_rack(endpoint), locator::node::state::normal);
> co_await tm.update_normal_tokens(std::move(tokens), id);
> }
> }).get();
> @@ -411,7 +411,7 @@ void heavy_origin_test() {
> stm.mutate_token_metadata([&] (token_metadata& tm) -> future<> {
> auto& topo = tm.get_topology();
> for (const auto& [ring_point, endpoint, id] : ring_points) {
> - topo.add_node(id, endpoint, make_endpoint_dc_rack(endpoint), locator::node::state::normal);
> + topo.add_node(id, make_endpoint_dc_rack(endpoint), locator::node::state::normal);
> co_await tm.update_normal_tokens(tokens[endpoint], id);
> }
> }).get();
> @@ -483,7 +483,7 @@ SEASTAR_THREAD_TEST_CASE(NetworkTopologyStrategy_tablets_test) {
> for (const auto& [ring_point, endpoint, id] : ring_points) {
> std::unordered_set<token> tokens;
> tokens.insert(token{tests::d2t(ring_point / ring_points.size())});
> - topo.add_node(id, endpoint, make_endpoint_dc_rack(endpoint), locator::node::state::normal, shard_count);
> + topo.add_node(id, make_endpoint_dc_rack(endpoint), locator::node::state::normal, shard_count);
> tm.update_host_id(id, endpoint);
> co_await tm.update_normal_tokens(std::move(tokens), id);
> }
> @@ -575,7 +575,7 @@ static void test_random_balancing(sharded<snitch_ptr>& snitch, gms::inet_address
> for (const auto& [ring_point, endpoint, id] : ring_points) {
> std::unordered_set<token> tokens;
> tokens.insert(token{tests::d2t(ring_point / ring_points.size())});
> - topo.add_node(id, endpoint, make_endpoint_dc_rack(endpoint), locator::node::state::normal, shard_count);
> + topo.add_node(id, make_endpoint_dc_rack(endpoint), locator::node::state::normal, shard_count);
> tm.update_host_id(id, endpoint);
> co_await tm.update_normal_tokens(std::move(tokens), id);
> }
> @@ -864,12 +864,11 @@ void generate_topology(topology& topo, const std::unordered_map<sstring, size_t>
> out = std::fill_n(out, rf, std::cref(dc));
> }
>
> - unsigned i = 0;
> for (auto& node : nodes) {
> const sstring& dc = dcs[udist(0, dcs.size() - 1)(e1)];
> auto rc =
racks_per_dc.at(dc);
> auto r = udist(0, rc)(e1);
> - topo.add_or_update_endpoint(node, inet_address((127u << 24) | ++i), endpoint_dc_rack{dc, to_sstring(r)}, locator::node::state::normal);
> + topo.add_or_update_endpoint(node, endpoint_dc_rack{dc, to_sstring(r)}, locator::node::state::normal);
> }
> }
>
> @@ -1144,21 +1143,15 @@ SEASTAR_THREAD_TEST_CASE(test_topology_tracks_local_node) {
>
> const node* n1 = stm.get()->get_topology().find_node(host1);
> BOOST_REQUIRE(n1);
> - n1 = stm.get()->get_topology().find_node(ip1);
> - BOOST_REQUIRE(n1);
> BOOST_REQUIRE(bool(n1->is_this_node()));
> BOOST_REQUIRE_EQUAL(n1->host_id(), host1);
> - BOOST_REQUIRE_EQUAL(n1->endpoint(), ip1);
> BOOST_REQUIRE(n1->dc_rack() == ip1_dc_rack);
> BOOST_REQUIRE(stm.get()->get_topology().get_location() == ip1_dc_rack);
>
> const node* n2 = stm.get()->get_topology().find_node(host2);
> BOOST_REQUIRE(n2);
> - n2 = stm.get()->get_topology().find_node(ip2);
> - BOOST_REQUIRE(n2);
> BOOST_REQUIRE(!bool(n2->is_this_node()));
> BOOST_REQUIRE_EQUAL(n2->host_id(), host2);
> - BOOST_REQUIRE_EQUAL(n2->endpoint(), ip2);
> BOOST_REQUIRE(n2->dc_rack() == endpoint_dc_rack::default_location);
>
> // Local node cannot be removed
> @@ -1171,8 +1164,6 @@ SEASTAR_THREAD_TEST_CASE(test_topology_tracks_local_node) {
>
> n1 = stm.get()->get_topology().find_node(host1);
> BOOST_REQUIRE(n1);
> - n1 = stm.get()->get_topology().find_node(ip1);
> - BOOST_REQUIRE(n1);
>
> // Removing node with no local node
>
> @@ -1183,22 +1174,19 @@ SEASTAR_THREAD_TEST_CASE(test_topology_tracks_local_node) {
>
> n2 = stm.get()->get_topology().find_node(host2);
> BOOST_REQUIRE(!n2);
> - n2 = stm.get()->get_topology().find_node(ip2);
> - BOOST_REQUIRE(!n2);
>
> // Repopulate after clear_gently()
>
> stm.mutate_token_metadata([&] (token_metadata& tm) -> future<> {
> co_await tm.clear_gently();
> - tm.update_host_id(host2, ip2);
> - tm.update_host_id(host1, ip1); // this_node added last on purpose
> + tm.update_topology(host2, std::nullopt, std::nullopt);
> + tm.update_topology(host1, std::nullopt, std::nullopt); // this_node added last on purpose
> }).get();
>
> n1 = stm.get()->get_topology().find_node(host1);
> BOOST_REQUIRE(n1);
> BOOST_REQUIRE(bool(n1->is_this_node()));
> BOOST_REQUIRE_EQUAL(n1->host_id(), host1);
> - BOOST_REQUIRE_EQUAL(n1->endpoint(), ip1);
> BOOST_REQUIRE(n1->dc_rack() == ip1_dc_rack);
> BOOST_REQUIRE(stm.get()->get_topology().get_location() == ip1_dc_rack);
>
> @@ -1206,21 +1194,19 @@ SEASTAR_THREAD_TEST_CASE(test_topology_tracks_local_node) {
> BOOST_REQUIRE(n2);
> BOOST_REQUIRE(!bool(n2->is_this_node()));
> BOOST_REQUIRE_EQUAL(n2->host_id(), host2);
> - BOOST_REQUIRE_EQUAL(n2->endpoint(), ip2);
> BOOST_REQUIRE(n2->dc_rack() == endpoint_dc_rack::default_location);
>
> // get_location() should pick up endpoint_dc_rack from node info
>
> stm.mutate_token_metadata([&] (token_metadata& tm) -> future<> {
> co_await tm.clear_gently();
> - tm.get_topology().add_or_update_endpoint(host1, ip1, ip1_dc_rack_v2, node::state::being_decommissioned);
> + tm.get_topology().add_or_update_endpoint(host1, ip1_dc_rack_v2, node::state::being_decommissioned);
> }).get();
>
> n1 = stm.get()->get_topology().find_node(host1);
> BOOST_REQUIRE(n1);
> BOOST_REQUIRE(bool(n1->is_this_node()));
> BOOST_REQUIRE_EQUAL(n1->host_id(), host1);
> - BOOST_REQUIRE_EQUAL(n1->endpoint(), ip1);
> BOOST_REQUIRE(n1->dc_rack() == ip1_dc_rack_v2);
> BOOST_REQUIRE(stm.get()->get_topology().get_location() == ip1_dc_rack_v2);
> }
> diff --git a/test/boost/tablets_test.cc b/test/boost/tablets_test.cc
> index 07ceaf04cac..e3e1d7c4d61 100644
> --- a/test/boost/tablets_test.cc
> +++ b/test/boost/tablets_test.cc
> @@ -1019,7 +1019,7 @@ SEASTAR_TEST_CASE(test_sharder) {
> auto table1 = table_id(utils::UUID_gen::get_time_UUID());
>
> token_metadata tokm(token_metadata::config{ .topo_cfg{ .this_host_id = h1, .local_dc_rack = locator::endpoint_dc_rack::default_location } });
> - tokm.get_topology().add_or_update_endpoint(h1, tokm.get_topology().my_address());
> + tokm.get_topology().add_or_update_endpoint(h1);
>
> std::vector<tablet_id> tablet_ids;
> {
> @@ -1234,7 +1234,7 @@ SEASTAR_TEST_CASE(test_intranode_sharding) {
> auto table1 = table_id(utils::UUID_gen::get_time_UUID());
>
> token_metadata tokm(token_metadata::config{ .topo_cfg{ .this_host_id = h1, .local_dc_rack = locator::endpoint_dc_rack::default_location } });
> - tokm.get_topology().add_or_update_endpoint(h1, tokm.get_topology().my_address());
> + tokm.get_topology().add_or_update_endpoint(h1);
>
> auto leaving_replica = tablet_replica{h1, 5};
> auto pending_replica = tablet_replica{h1, 7};
> @@ -3339,7 +3339,7 @@ static void execute_tablet_for_new_rf_test(calculate_tablet_replicas_for_new_rf_
> for (const auto& [ring_point, endpoint, id] : test_config.ring_points) {
> std::unordered_set<token> tokens;
> tokens.insert(dht::token{tests::d2t(ring_point / test_config.ring_points.size())});
> - topo.add_or_update_endpoint(id, endpoint, make_endpoint_dc_rack(endpoint), locator::node::state::normal, 1);
> + topo.add_or_update_endpoint(id, make_endpoint_dc_rack(endpoint), locator::node::state::normal, 1);
> tm.update_host_id(id, endpoint);
> co_await tm.update_normal_tokens(std::move(tokens), id);
> }
> diff --git a/test/lib/cql_test_env.cc b/test/lib/cql_test_env.cc
> index b73cf303438..f44423bc1e2 100644
> --- a/test/lib/cql_test_env.cc
> +++ b/test/lib/cql_test_env.cc
> @@ -685,11 +685,10 @@ class single_node_cql_env : public cql_test_env {
> host_id = linfo.host_id;
> _sys_ks.local().save_local_info(std::move(linfo), _snitch.local()->get_location(), my_address, my_address).get();
> }
> - locator::shared_token_metadata::mutate_on_all_shards(_token_metadata, [hostid = host_id, &cfg_in] (locator::token_metadata& tm) {
> + locator::shared_token_metadata::mutate_on_all_shards(_token_metadata, [hostid = host_id] (locator::token_metadata& tm) {
> auto& topo = tm.get_topology();
> topo.set_host_id_cfg(hostid);
> topo.add_or_update_endpoint(hostid,
> - cfg_in.broadcast_address,
> std::nullopt,
> locator::node::state::normal,
> smp::count);
> diff --git a/test/perf/perf_sort_by_proximity.cc b/test/perf/perf_sort_by_proximity.cc
> index 4d86bcea1df..438bf19ebd2 100644
> --- a/test/perf/perf_sort_by_proximity.cc
> +++ b/test/perf/perf_sort_by_proximity.cc
> @@ -53,7 +53,6 @@ struct sort_by_proximity_topology {
> auto id = locator::host_id{utils::UUID(0, i)};
> nodes[dc][rack].emplace_back(id);
> topology.add_or_update_endpoint(id,
> - gms::inet_address((127u << 24) | i),
> locator::endpoint_dc_rack{format("dc{}", dc), format("rack{}", rack)},
> locator::node::state::normal);
> }