diff --git a/src/v/cluster/cluster_utils.cc b/src/v/cluster/cluster_utils.cc index f0c205c3145d8..547e15849445f 100644 --- a/src/v/cluster/cluster_utils.cc +++ b/src/v/cluster/cluster_utils.cc @@ -401,18 +401,7 @@ std::optional check_result_configuration( new_configuration.id()); } auto& current_configuration = it->second.broker; - /** - * do no allow to decrease node core count - */ - if ( - current_configuration.properties().cores - > new_configuration.properties().cores) { - return fmt::format( - "core count must not decrease on any broker, currently configured " - "core count: {}, requested core count: {}", - current_configuration.properties().cores, - new_configuration.properties().cores); - } + /** * When cluster member configuration changes Redpanda by default doesn't * allow the change if a new cluster configuration would have two diff --git a/src/v/cluster/scheduling/allocation_node.cc b/src/v/cluster/scheduling/allocation_node.cc index 8b2ec285b77b3..4f3436fc0427d 100644 --- a/src/v/cluster/scheduling/allocation_node.cc +++ b/src/v/cluster/scheduling/allocation_node.cc @@ -142,15 +142,13 @@ void allocation_node::remove_final_count(partition_allocation_domain domain) { } void allocation_node::update_core_count(uint32_t core_count) { - vassert( - core_count >= cpus(), - "decreasing node core count is not supported, current core count {} > " - "requested core count {}", - cpus(), - core_count); - auto current_cpus = cpus(); - for (auto i = current_cpus; i < core_count; ++i) { - _weights.push_back(0); + auto old_count = _weights.size(); + if (core_count < old_count) { + _weights.resize(core_count); + } else { + for (auto i = old_count; i < core_count; ++i) { + _weights.push_back(0); + } } _max_capacity = allocation_capacity( (core_count * _partitions_per_shard()) - _partitions_reserve_shard0());