Skip to content

Commit

Permalink
fix(backend): 资源池协议改造体验问题修复 #7747
Browse files Browse the repository at this point in the history
  • Loading branch information
iSecloud committed Feb 14, 2025
1 parent 6082d72 commit e28a318
Show file tree
Hide file tree
Showing 24 changed files with 93 additions and 74 deletions.
14 changes: 12 additions & 2 deletions dbm-ui/backend/db_dirty/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,12 @@ def transfer_hosts_to_pool(self, request):
responses={status.HTTP_200_OK: ListMachineEventResponseSerializer()},
tags=[SWAGGER_TAG],
)
@action(detail=False, methods=["GET"], filter_class=MachineEventFilter, queryset=MachineEvent.objects.all())
@action(
detail=False,
methods=["GET"],
filter_class=MachineEventFilter,
queryset=MachineEvent.objects.all().order_by("-update_at"),
)
def list_machine_events(self, request):
events_qs = self.paginate_queryset(self.filter_queryset(self.get_queryset()))
events_data = ListMachineEventSerializer(events_qs, many=True).data
Expand All @@ -67,7 +72,12 @@ def list_machine_events(self, request):
responses={status.HTTP_200_OK: ListMachinePoolResponseSerializer()},
tags=[SWAGGER_TAG],
)
@action(detail=False, methods=["GET"], filter_class=DirtyMachinePoolFilter, queryset=DirtyMachine.objects.all())
@action(
detail=False,
methods=["GET"],
filter_class=DirtyMachinePoolFilter,
queryset=DirtyMachine.objects.all().order_by("-update_at"),
)
def query_machine_pool(self, request):
machine_qs = self.paginate_queryset(self.filter_queryset(self.get_queryset()))
# 查询主机池主机信息
Expand Down
9 changes: 2 additions & 7 deletions dbm-ui/backend/db_services/dbresource/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,17 +81,16 @@ class Meta:

class ResourceListSerializer(serializers.Serializer):
for_biz = serializers.IntegerField(help_text=_("专属业务"), required=False)
for_bizs = serializers.ListField(help_text=_("专属业务列表"), child=serializers.IntegerField(), required=False)
resource_type = serializers.CharField(help_text=_("专属DB"), required=False, allow_null=True, allow_blank=True)
resource_types = serializers.ListField(help_text=_("专属DB列表"), child=serializers.CharField(), required=False)
device_class = serializers.CharField(help_text=_("机型"), required=False)
hosts = serializers.CharField(help_text=_("主机IP列表"), required=False)
bk_cloud_ids = serializers.CharField(help_text=_("云区域ID列表"), required=False)
city = serializers.CharField(help_text=_("城市"), required=False)
subzones = serializers.CharField(help_text=_("园区"), required=False)
subzone_ids = serializers.CharField(help_text=_("园区ID"), required=False)

set_empty_biz = serializers.BooleanField(help_text=_("是否无专用业务"), required=False, default=False)
set_empty_resource_type = serializers.BooleanField(help_text=_("是否无专用资源类型"), required=False, default=False)

os_type = serializers.CharField(help_text=_("操作系统类型"), required=False)
cpu = serializers.CharField(help_text=_("cpu资源限制"), required=False)
mem = serializers.CharField(help_text=_("内存资源限制"), required=False)
Expand All @@ -108,10 +107,6 @@ class ResourceListSerializer(serializers.Serializer):

@staticmethod
def format_fields(attrs, fields):
# 如果没有专用业务和专用DB,则无限制查询
attrs["set_empty_biz"] = "for_biz" not in attrs
attrs["set_empty_resource_type"] = "resource_type" not in attrs

# 用逗号方便前端URL渲染,这里统一转换为数组 or obj
for field in fields:
divider = "-" if field in ["cpu", "mem", "disk"] else ","
Expand Down
2 changes: 1 addition & 1 deletion dbm-ui/backend/db_services/mysql/cluster/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class ClusterViewSet(BaseClusterViewSet):
default_permission_class = [DBManagePermission()]

@common_swagger_auto_schema(
operation_summary=_("通过过滤条件批量查询集群"),
operation_summary=_("通过过滤条件批量查询集群[Deprecated!! 这个方法将被移除,请不要调用]"),
request_body=QueryClustersRequestSerializer(),
tags=[SWAGGER_TAG],
responses={status.HTTP_200_OK: QueryClustersResponseSerializer()},
Expand Down
4 changes: 2 additions & 2 deletions dbm-ui/backend/ticket/builders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,8 +277,8 @@ class RecycleParamBuilder(FlowParamBuilder):
DBType.Hdfs.value: "hdfs.HdfsController.hdfs_machine_clear_scene",
DBType.Pulsar.value: "pulsar.PulsarController.pulsar_machine_clear_scene",
DBType.Vm.value: "vm.VmController.vm_machine_clear_scene",
# TODO redis,sqlserver,mongo清理流程暂时没有
DBType.Redis.value: "",
DBType.Redis.value: "redis.RedisController.redis_dirty_machine_clear",
# TODO sqlserver,mongo清理流程暂时没有
DBType.Sqlserver.value: "",
DBType.MongoDB.value: "",
}
Expand Down
4 changes: 2 additions & 2 deletions dbm-ui/backend/ticket/builders/common/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def remove_useless_spec(attrs: Dict[str, Any]) -> Dict[str, Any]:
return attrs

for role, spec in attrs["resource_spec"].items():
if spec and spec["count"]:
if spec and spec.get("count"):
real_resource_spec[role] = spec

attrs["resource_spec"] = real_resource_spec
Expand Down Expand Up @@ -223,7 +223,7 @@ def validated_cluster_type(cls, cluster_ids: List[int], cluster_type: ClusterTyp
def validate_instance_role(cls, inst_list: List[Dict], role: Union[AccessLayer, InstanceInnerRole]):
"""校验实例角色类型"""

inst_filters = reduce(operator.or_, [Q(machine__ip=inst["ip"], port=inst["port"]) for inst in inst_list])
inst_filters = reduce(operator.or_, [Q(machine__ip=inst["ip"]) for inst in inst_list])
check_role_info = list(
StorageInstance.objects.annotate(role=F("instance_inner_role"))
.filter(inst_filters)
Expand Down
10 changes: 6 additions & 4 deletions dbm-ui/backend/ticket/builders/common/bigdata.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,13 @@ def validate(self, attrs):

for role in old_nodes:
old_role_num = len(old_nodes[role])
new_role_num = (
len(new_nodes[role]) if attrs["ip_source"] == IpSource.MANUAL_INPUT else new_nodes[role]["count"]
)
if attrs["ip_source"] == IpSource.MANUAL_INPUT:
new_role_num = len(new_nodes[role])
else:
# 兼容资源池自动匹配和手动输入的场景
new_role_num = new_nodes[role].get("count") or len(new_nodes[role]["hosts"])
if old_role_num != new_role_num:
raise serializers.ValidationError(_("角色{}替换前后数量不一致,请保证替换前后角色类型和数量一致!").format(role))
raise serializers.ValidationError(_("角色{}替换前后数量不一致").format(role))

# 判断主机是否来自手工输入,从资源池拿到的主机不需要校验
if attrs["ip_source"] == IpSource.RESOURCE_POOL:
Expand Down
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/es/es_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,15 @@
from backend.db_meta.enums import ClusterPhase
from backend.flow.engine.controller.es import EsController
from backend.ticket import builders
from backend.ticket.builders.common.base import HostRecycleSerializer
from backend.ticket.builders.common.bigdata import BaseEsTicketFlowBuilder, BigDataTakeDownDetailSerializer
from backend.ticket.constants import TicketType

logger = logging.getLogger("root")


class EsDestroyDetailSerializer(BigDataTakeDownDetailSerializer):
pass
ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT)


class EsDestroyFlowParamBuilder(builders.FlowParamBuilder):
Expand Down
1 change: 1 addition & 0 deletions dbm-ui/backend/ticket/builders/es/es_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,4 @@ class EsReplaceFlowBuilder(BaseEsTicketFlowBuilder):
inner_flow_builder = EsReplaceFlowParamBuilder
inner_flow_name = _("ES集群替换")
resource_apply_builder = EsReplaceResourceParamBuilder
need_patch_recycle_host_details = True
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,15 @@
from backend.db_meta.enums import ClusterPhase
from backend.flow.engine.controller.hdfs import HdfsController
from backend.ticket import builders
from backend.ticket.builders.common.base import HostRecycleSerializer
from backend.ticket.builders.common.bigdata import BaseHdfsTicketFlowBuilder, BigDataTakeDownDetailSerializer
from backend.ticket.constants import TicketType

logger = logging.getLogger("root")


class HdfsDestroyDetailSerializer(BigDataTakeDownDetailSerializer):
pass
ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT)


class HdfsDestroyFlowParamBuilder(builders.FlowParamBuilder):
Expand Down
1 change: 1 addition & 0 deletions dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,4 @@ class HdfsReplaceFlowBuilder(BaseHdfsTicketFlowBuilder):
inner_flow_builder = HdfsReplaceFlowParamBuilder
inner_flow_name = _("HDFS 集群替换")
resource_apply_builder = HdfsResourceParamBuilder
need_patch_recycle_host_details = True
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@
from backend.db_meta.enums import ClusterPhase
from backend.flow.engine.controller.kafka import KafkaController
from backend.ticket import builders
from backend.ticket.builders.common.base import HostRecycleSerializer
from backend.ticket.builders.common.bigdata import BaseKafkaTicketFlowBuilder, BigDataTakeDownDetailSerializer
from backend.ticket.constants import TicketType

logger = logging.getLogger("root")


class KafkaDestroyDetailSerializer(BigDataTakeDownDetailSerializer):
pass
ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT)


class KafkaDestroyFlowParamBuilder(builders.FlowParamBuilder):
Expand Down
1 change: 1 addition & 0 deletions dbm-ui/backend/ticket/builders/kafka/kafka_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,4 @@ class KafkaReplaceFlowBuilder(BaseKafkaTicketFlowBuilder):
inner_flow_builder = KafkaReplaceFlowParamBuilder
inner_flow_name = _("Kafka 集群替换")
resource_apply_builder = KafkaReplaceResourceParamBuilder
need_patch_recycle_host_details = True
3 changes: 3 additions & 0 deletions dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@ def post_callback(self):
else:
info["bk_new_master"], info["bk_new_slave"] = info.pop("new_master")[0], info.pop("new_slave")[0]
info["new_master_ip"], info["new_slave_ip"] = info["bk_new_master"]["ip"], info["bk_new_slave"]["ip"]
# 修改规格key值
info["resource_spec"]["remote"] = info["resource_spec"].pop("master")
info["resource_spec"].pop("slave")
next_flow.save(update_fields=["details"])


Expand Down
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@
from backend.db_meta.enums import ClusterPhase
from backend.flow.engine.controller.pulsar import PulsarController
from backend.ticket import builders
from backend.ticket.builders.common.base import HostRecycleSerializer
from backend.ticket.builders.common.bigdata import BasePulsarTicketFlowBuilder, BigDataTakeDownDetailSerializer
from backend.ticket.constants import TicketType

logger = logging.getLogger("root")


class PulsarDestroyDetailSerializer(BigDataTakeDownDetailSerializer):
pass
ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT)


class PulsarDestroyFlowParamBuilder(builders.FlowParamBuilder):
Expand Down
1 change: 1 addition & 0 deletions dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,4 @@ class PulsarReplaceFlowBuilder(BasePulsarTicketFlowBuilder):
inner_flow_builder = PulsarReplaceFlowParamBuilder
inner_flow_name = _("Pulsar 集群替换")
resource_apply_builder = PulsarReplaceResourceParamBuilder
need_patch_recycle_host_details = True
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@
from rest_framework import serializers

from backend.configuration.constants import AffinityEnum
from backend.db_meta.models import Cluster
from backend.db_services.dbbase.constants import IpSource
from backend.db_services.dbresource.handlers import ResourceHandler
from backend.flow.consts import RedisCapacityUpdateType
from backend.flow.engine.controller.redis import RedisController
from backend.ticket import builders
Expand Down Expand Up @@ -92,18 +90,7 @@ class RedisScaleUpDownFlowBuilder(BaseRedisTicketFlowBuilder):
inner_flow_builder = RedisScaleUpDownParamBuilder
inner_flow_name = _("Redis 集群容量变更")
resource_batch_apply_builder = RedisScaleUpDownResourceParamBuilder

def patch_down_cluster_hosts(self):
"""针对全部全部机器替换,获取所有的下架机器"""
cluster_ids = [
info["cluster_id"]
for info in self.ticket.details["infos"]
if info["update_mode"] == RedisCapacityUpdateType.ALL_MACHINES_REPLACE
]
recycle_hosts = Cluster.get_cluster_related_machines(cluster_ids)
recycle_hosts = [{"bk_host_id": host_id} for host_id in recycle_hosts]
self.ticket.details["recycle_hosts"] = ResourceHandler.standardized_resource_host(recycle_hosts)
need_patch_recycle_host_details = True

def patch_ticket_detail(self):
self.patch_down_cluster_hosts()
super().patch_ticket_detail()
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class MNTApplySerializer(serializers.Serializer):

infos = serializers.ListField(help_text=_("添加spider运维节点信息"), child=MNTApplySerializer())
ip_source = serializers.ChoiceField(
help_text=_("机器导入类型"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT
help_text=_("机器导入类型"), choices=IpSource.get_choices(), required=False, default=IpSource.RESOURCE_POOL
)

def validate(self, attrs):
Expand Down
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/vm/vm_destroy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,15 @@
from backend.db_meta.enums import ClusterPhase
from backend.flow.engine.controller.vm import VmController
from backend.ticket import builders
from backend.ticket.builders.common.base import HostRecycleSerializer
from backend.ticket.builders.common.bigdata import BaseVmTicketFlowBuilder, BigDataTakeDownDetailSerializer
from backend.ticket.constants import TicketType

logger = logging.getLogger("root")


class VmDestroyDetailSerializer(BigDataTakeDownDetailSerializer):
pass
ip_recycle = HostRecycleSerializer(help_text=_("主机回收信息"), default=HostRecycleSerializer.DEFAULT)


class VmDestroyFlowParamBuilder(builders.FlowParamBuilder):
Expand Down
3 changes: 2 additions & 1 deletion dbm-ui/backend/ticket/builders/vm/vm_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,10 @@ def post_callback(self):
next_flow.save(update_fields=["details"])


@builders.BuilderFactory.register(TicketType.VM_REPLACE, is_apply=True)
@builders.BuilderFactory.register(TicketType.VM_REPLACE, is_apply=True, is_recycle=True)
class VmReplaceFlowBuilder(BaseVmTicketFlowBuilder):
serializer = VmReplaceDetailSerializer
inner_flow_builder = VmReplaceFlowParamBuilder
inner_flow_name = _("VictoriaMetrics 集群替换")
resource_apply_builder = VmReplaceResourceParamBuilder
need_patch_recycle_host_details = True
11 changes: 9 additions & 2 deletions dbm-ui/backend/ticket/flow_manager/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from backend.ticket.flow_manager.resource import ResourceApplyFlow, ResourceBatchApplyFlow, ResourceDeliveryFlow
from backend.ticket.flow_manager.timer import TimerFlow
from backend.ticket.models import Ticket
from backend.ticket.tasks.ticket_tasks import create_recycle_ticket

SUPPORTED_FLOW_MAP = {
FlowType.BK_ITSM.value: ItsmFlow,
Expand Down Expand Up @@ -104,13 +105,19 @@ def update_ticket_status(self):
return
origin_status, ticket.status = ticket.status, target_status
ticket.save(update_fields=["status", "update_at"])
self.ticket_status_trigger(origin_status, target_status)

# 执行状态更新钩子函数
self.ticket_status_trigger(origin_status, target_status)

def ticket_status_trigger(self, origin_status, target_status):
"""单据状态更新后的钩子函数"""
"""单据状态更新后的钩子函数。注:如果钩子函数非关键链路,请异步发起"""

# 单据状态变更后,发送通知。
# 忽略运行中:流转到内置任务无需通知,待继续在todo创建时才触发通知
# 忽略待补货:到资源申请节点,单据状态总会流转为待补货,但是只有待补货todo创建才触发通知
if target_status not in [TicketStatus.RUNNING, TicketStatus.RESOURCE_REPLENISH]:
notify.send_msg.apply_async(args=(self.ticket.id,))

# 如果是inner flow的终止,要联动回收主机。
if target_status == TicketStatus.TERMINATED and self.current_flow_obj.flow_type == FlowType.INNER_FLOW:
create_recycle_ticket.apply_async(args=(self.ticket.id,))
2 changes: 1 addition & 1 deletion dbm-ui/backend/ticket/flow_manager/resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def apply_resource(self, ticket_data):

# groups_in_same_location只在同城同园区亲和性下才成效,保证所有组申请的机器都在同园区
# 目前所有组亲和性相同,任取一个判断即可
affinity = apply_params["details"][0]["affinity"]
affinity = apply_params["details"][0].get("affinity", AffinityEnum.NONE)
if affinity in [AffinityEnum.SAME_SUBZONE, AffinityEnum.SAME_SUBZONE_CROSS_SWTICH]:
apply_params.update(groups_in_same_location=True)

Expand Down
Loading

0 comments on commit e28a318

Please sign in to comment.