修复毛坯库业务接口逻辑

This commit is contained in:
靳中伟 2025-08-15 10:58:25 +08:00
parent 7ef023842b
commit b3015bfb37
12 changed files with 7428 additions and 2668 deletions

Binary file not shown.

File diff suppressed because it is too large Load Diff

2410
logs/app.log.2025-08-13 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -100,7 +100,7 @@ async def call_agv_goods_move_callback(pid: str, user_id: str = "000307") -> boo
"UserID": user_id
}
max_retries = 1000 # 最大重试次数,防止无限循环
max_retries = 100 # 最大重试次数,防止无限循环
retry_count = 0
while retry_count < max_retries:
@ -453,7 +453,7 @@ async def create_new_task(request: Request, task_request: ExternalTaskRequest =
storage_area = TASK_TYPE_AREA.get(TASK_TYPE_TEMPLATE_MAPPING.get(task_request.TaskType))
priority = TASK_TYPE_PRIORITY.get(TASK_TYPE_TEMPLATE_MAPPING.get(task_request.TaskType, "OR"))
remark = TASK_TYPE_REMARK.get(task_request.TaskType)
remark = remark.format(storage_area, target_storage_location_id)
remark = remark.format(target_storage_location_id, storage_area)
area_obj = TaskInputParamNew(
name="area",
type=InputParamType.STRING,
@ -483,7 +483,23 @@ async def create_new_task(request: Request, task_request: ExternalTaskRequest =
required=True,
defaultValue=remark,
remark="任务详情备注")
task_params = [area_obj, target_obj, priority_obj, remark_obj]
req_code = TaskInputParamNew(
name="req_code",
type=InputParamType.STRING,
label="请求码",
required=True,
defaultValue=task_request.ReqCode,
remark="任务请求的唯一编码")
task_type = TaskInputParamNew(
name="task_type",
type=InputParamType.STRING,
label="任务类型",
required=True,
defaultValue=task_request.TaskType,
remark="任务类型")
task_params = [area_obj, target_obj, priority_obj, remark_obj, req_code, task_type]
# 构造任务执行请求
run_request = TaskEditRunRequest(
@ -561,15 +577,6 @@ async def create_new_task(request: Request, task_request: ExternalTaskRequest =
response_row_count=1,
response_data=result
)
logger.info(f"任务启动成功: ReqCode={task_request.ReqCode}, TaskRecordId={task_record_id}, task_request.TaskType={task_request.TaskType}")
# 启动异步任务监控,不阻塞当前接口
if task_record_id and task_request.TaskType in ["GTFK2MP", "GGFK2MP"] and TF_WEB_POST:
asyncio.create_task(monitor_task_and_callback(
task_record_id=task_record_id,
req_code=task_request.ReqCode
))
return ExternalTaskResponse(
code=0,
reqCode=task_request.ReqCode,
@ -717,7 +724,7 @@ async def gen_agv_scheduling_task(request: Request, task_request: GenAgvScheduli
# 构造任务运行参数
task_params = []
remark = remark.format(start_node, end_node)
remark = remark.format(start_storage_location_id, end_storage_location_id)
# 添加任务代码参数
task_params.append(TaskInputParamNew(
name="START_WL",

View File

@ -113,6 +113,7 @@ async def set_task_error(
包含设置结果的响应包括任务状态错误原因更新的任务块数量等信息
"""
try:
logger.info("set-error 请求参数:"+str(request_data)+", task_record_id:"+str(task_record_id))
# 获取错误原因
error_reason = request_data.error_reason if request_data else "未知错误"

View File

@ -1,5 +1,7 @@
import asyncio
from typing import Dict, Any
EXTERNAL_CALLBACK_URL = "http://roh.vwfawedl.mobi:9001/AGVService/ContainerSendBackRequest" # 生产线到毛坯库任务
async def test1(a: int, b: int) -> int:
return {"name": a + b}
@ -91,107 +93,105 @@ async def validate_task_condition(function_args: Dict[str, Any]) -> Dict[str, An
template_type = TASK_TYPE_TEMPLATE_MAPPING.get(task_type, "")
logger.info(f"TaskCode={task_code}, TaskType={task_type}, TemplateType={template_type}")
# 如果是GT类型需要验证end_node库位是否解锁
if template_type == "GT":
if not end_node:
return {
"success": False,
"message": "GT类型任务需要提供end_node参数"
}
logger.info(f"GT类型任务需要验证end_node库位解锁状态: TaskCode={task_code}")
# 验证end_node对应的库位是否解锁
while True:
try:
async with get_async_session() as session:
# 查询end_node对应的库位锁定状态
stmt = select(OperatePointLayer).where(
OperatePointLayer.layer_name == end_node,
OperatePointLayer.is_deleted == False
).limit(1)
result = await session.execute(stmt)
end_layer = result.scalar_one_or_none()
task_detail_result = await TaskRecordService.get_task_record_detail(
latest_record.task_record_id)
task_detail = task_detail_result.get("data", {})
task_status = task_detail.get("status", "")
if task_status == TaskStatus.CANCELED:
return {
"success": True,
"message": f"任务被取消: TaskCode={task_code}, Status={task_status}"
}
if end_layer:
if not end_layer.is_locked:
logger.info(f"GT类型任务end_node库位已解锁可以执行: TaskCode={task_code}, end_node={end_node}")
return {
"success": True,
"message": f"GT类型任务验证通过end_node库位已解锁: {end_node}"
}
else:
logger.info(f"GT类型任务end_node库位被锁定等待解锁: TaskCode={task_code}, end_node={end_node}, locked_by={end_layer.locked_by}")
await asyncio.sleep(2) # 等待2秒后重试
else:
logger.warning(f"GT类型任务未找到end_node对应的库位继续执行: TaskCode={task_code}, end_node={end_node}")
return {
"success": True,
"message": f"GT类型任务验证通过未找到对应库位继续执行: {end_node}"
}
except Exception as e:
logger.error(f"GT类型任务检查end_node库位锁定状态时出现异常: {str(e)}, TaskCode={task_code}, end_node={end_node}")
await asyncio.sleep(2) # 等待2秒后重试
else:
# 非GT类型需要等待任务完成
logger.info(f"非GT类型任务需要等待关联任务完成: TaskCode={task_code}")
wait_count = 0
while True:
# 调用get_task_record_detail查询任务运行状态
task_detail_result = await TaskRecordService.get_task_record_detail(latest_record.task_record_id)
if task_detail_result.get("success", False):
while True:
try:
async with get_async_session() as session:
# 查询end_node对应的库位锁定状态
stmt = select(OperatePointLayer).where(
OperatePointLayer.layer_name == end_node,
OperatePointLayer.is_deleted == False
).limit(1)
result = await session.execute(stmt)
end_layer = result.scalar_one_or_none()
task_detail_result = await TaskRecordService.get_task_record_detail(
latest_record.task_record_id)
task_detail = task_detail_result.get("data", {})
task_status = task_detail.get("status", "")
logger.info(f"检查任务状态: TaskCode={task_code}, Status={task_status}, WaitCount={wait_count}")
# 如果任务已完成(成功)
if task_status == TaskStatus.COMPLETED:
logger.info(f"关联任务已完成继续执行AGV调度任务: TaskCode={task_code}")
return {
"success": True,
"message": f"任务验证通过,关联任务已完成: {task_code}"
}
# 如果任务已失败
elif task_status == TaskStatus.FAILED:
logger.error(f"关联任务执行失败: TaskCode={task_code}, Status={task_status}")
return {
"success": False,
"message": f"关联任务执行失败: TaskCode={task_code}, Status={task_status}"
}
elif task_status == TaskStatus.CANCELED:
if task_status == TaskStatus.CANCELED:
return {
"success": True,
"message": f"任务被取消: TaskCode={task_code}, Status={task_status}"
}
# 任务还在运行中,继续等待
if end_layer:
if not end_layer.is_locked:
return {
"success": True,
"message": f"任务验证通过end_node库位已解锁: {end_node}"
}
else:
logger.info(f"任务end_node库位被锁定等待解锁: TaskCode={task_code}, end_node={end_node}, locked_by={end_layer.locked_by}")
await asyncio.sleep(2) # 等待2秒后重试
else:
logger.info(f"任务仍在执行中,继续等待: TaskCode={task_code}, Status={task_status}")
await asyncio.sleep(1) # 等待1秒
wait_count += 1
else:
logger.warning(f"无法获取任务详情,继续等待: TaskCode={task_code}, TaskRecordId={latest_record.task_record_id}")
await asyncio.sleep(1) # 等待1秒
wait_count += 1
logger.warning(f"任务未找到end_node对应的库位继续执行: TaskCode={task_code}, end_node={end_node}")
return {
"success": True,
"message": f"任务验证通过,未找到对应库位,继续执行: {end_node}"
}
except Exception as e:
logger.error(f"任务检查end_node库位锁定状态时出现异常: {str(e)}, TaskCode={task_code}, end_node={end_node}")
await asyncio.sleep(2) # 等待2秒后重试
except Exception as e:
logger.error(f"任务状态验证异常: {str(e)}")
return {
"success": False,
"message": f"任务状态验证异常: {str(e)}"
}
}
async def call_external_callback(req_code: str, task_type: str, arrival_user: str = "000307" ) -> dict:
"""
调用外部回调接口
Args:
req_code: 到货编号ReqCode
task_type: 任务类型
arrival_user: 到货用户固定值 000307
Returns:
bool: 调用是否成功返回result为0
"""
arrival_no = req_code
import aiohttp
from utils.logger import get_logger
logger = get_logger("scripts.user_save.test1")
if task_type not in ["GTFK2MP", "GGFK2MP"]:
return {"message": "不是返空类型 不需要过账"}
payload = {
"arrival_no": arrival_no,
"arrival_user": arrival_user
}
max_retries = 100 # 最大重试次数,防止无限循环
retry_count = 0
while retry_count < max_retries:
try:
async with aiohttp.ClientSession() as sessions:
async with sessions.post(EXTERNAL_CALLBACK_URL, json=payload) as response:
result = await response.json()
logger.info(f"外部接口调用响应: {result}, arrival_no={arrival_no}, 重试次数={retry_count}")
# 检查响应结果
if result.get("result") == "0":
logger.info(f"外部接口调用成功: arrival_no={arrival_no}, 总重试次数={retry_count}")
return {"message": "空托盘过账成功!"}
elif result.get("result") == "1":
logger.info(f"外部接口返回result=1继续重试: arrival_no={arrival_no}, 重试次数={retry_count}")
retry_count += 1
await asyncio.sleep(5) # 等待5秒后重试
else:
logger.error(f"外部接口返回异常结果: {result}, arrival_no={arrival_no}")
retry_count += 1
await asyncio.sleep(5)
except Exception as e:
logger.error(f"调用外部接口异常: {str(e)}, arrival_no={arrival_no}, 重试次数={retry_count}")
retry_count += 1
await asyncio.sleep(5) # 等待5秒后重试
logger.error(f"外部接口调用失败,已达到最大重试次数: arrival_no={arrival_no}, 最大重试次数={max_retries}")
return {"message": "空托盘过账失败,重新尝试次数达到最大值"}

View File

@ -751,7 +751,7 @@ class GetIdleCrowdedSiteBlockHandler(StorageBlockHandler):
for area_name in group_names:
logger.info(f"正在查询库区: {area_name}")
# 原子性地获取库区信息,确定选择库位的逻辑
# 原子性地获取库区信息,确定选择库位的逻辑,使用行锁确保获取最新数据
area_query = select(StorageArea).where(
StorageArea.area_name == area_name,
StorageArea.scene_id == map_id,
@ -768,7 +768,7 @@ class GetIdleCrowdedSiteBlockHandler(StorageBlockHandler):
existing_area_names.append(area_name)
processed_any_area = True
# 检查库区是否有锁定的库位
# 检查库区是否有锁定的库位,使用行锁确保获取最新状态
locked_count_query = select(OperatePointLayer).where(
OperatePointLayer.area_name == area_name,
OperatePointLayer.scene_id == map_id,
@ -831,7 +831,7 @@ class GetIdleCrowdedSiteBlockHandler(StorageBlockHandler):
try:
import datetime
# 获取库区内所有库位,用于确定禁用库位的影响范围和排序
# 获取库区内所有库位,用于确定禁用库位的影响范围和排序,使用行锁确保获取最新状态
all_layers_query = select(OperatePointLayer).where(
OperatePointLayer.area_name == area_name,
OperatePointLayer.scene_id == map_id,
@ -903,7 +903,7 @@ class GetIdleCrowdedSiteBlockHandler(StorageBlockHandler):
else:
base_conditions.append(OperatePointLayer.is_occupied == False)
# 查询候选库位用于排序
# 查询候选库位用于排序,使用行锁确保获取最新状态
candidate_query = select(OperatePointLayer).where(and_(*base_conditions))
candidate_result = await session.execute(candidate_query)
candidate_layers = candidate_result.scalars().all()
@ -1335,7 +1335,7 @@ class GetIdleSiteBlockHandler(StorageBlockHandler):
if group_name:
conditions.append(OperatePointLayer.area_name == group_name)
# print(conditions, "====================")
# 查询候选库位,同时验证存在性
# 查询候选库位,同时验证存在性,使用行锁确保获取最新状态
candidate_query = select(OperatePointLayer).where(and_(*conditions))
# print(candidate_query)
# print(input_params.get("filled"), "------------------")
@ -2458,8 +2458,8 @@ class SetSiteLockedBlockHandler(StorageBlockHandler):
解决并发任务间库位状态同步问题
"""
try:
# 先检查库位是否存在
async with get_async_session_read_committed() as session:
# 先检查库位是否存在
site_check_query = select(OperatePointLayer).where(
OperatePointLayer.layer_name == site_id,
OperatePointLayer.scene_id == map_id
@ -2472,169 +2472,174 @@ class SetSiteLockedBlockHandler(StorageBlockHandler):
"success": False,
"message": f"库位不存在: {site_id}"
}
# 实现重试逻辑
attempts = 0
retry_sleep = 1 # 默认重试间隔为1秒
# 区分两种模式:有限重试和无限重试(阻塞模式)
if retry_times is None:
# 无限重试模式(阻塞模式)
logger.info(f"库位 {site_id} 进入阻塞模式,将持续等待直到获取锁定")
while True:
attempts += 1
# 检查任务是否被取消
if context:
is_canceled = await context.is_task_canceled_async()
if context.is_failed:
return {
"success": False,
"message": f"任务已失败,停止等待锁定库位: {context.failure_reason}",
"is_failed": True,
"is_canceled": is_canceled
}
if is_canceled:
logger.info(f"检测到任务已被取消,停止等待锁定库位 {site_id}")
return {
"success": True,
"message": "任务已被取消,停止等待锁定库位",
"is_canceled": True
}
# 重新查询库位当前状态
# 实现重试逻辑
attempts = 0
retry_sleep = 1 # 默认重试间隔为1秒
# 区分两种模式:有限重试和无限重试(阻塞模式)
if retry_times is None:
# 无限重试模式(阻塞模式)
logger.info(f"库位 {site_id} 进入阻塞模式,将持续等待直到获取锁定")
while True:
attempts += 1
# 检查任务是否被取消
if context:
is_canceled = await context.is_task_canceled_async()
if context.is_failed:
return {
"success": False,
"message": f"任务已失败,停止等待锁定库位: {context.failure_reason}",
"is_failed": True,
"is_canceled": is_canceled
}
if is_canceled:
logger.info(f"检测到任务已被取消,停止等待锁定库位 {site_id}")
return {
"success": True,
"message": "任务已被取消,停止等待锁定库位",
"is_canceled": True
}
# 使用新会话确保获取最新的已提交数据(无限重试模式)
# 重新查询库位当前状态
async with get_async_session_read_committed() as fresh_session:
current_site_query = select(OperatePointLayer).where(
OperatePointLayer.layer_name == site_id,
OperatePointLayer.scene_id == map_id,
OperatePointLayer.is_disabled == False,
)
current_site_result = await session.execute(current_site_query)
current_site_result = await fresh_session.execute(current_site_query)
current_layer = current_site_result.scalar_one_or_none()
if not current_layer:
return {
"success": False,
"message": f"库位不存在: {site_id}"
}
# 检查库位是否已被锁定
if current_layer.is_locked:
# 如果是同一个锁定者,视为已锁定成功
if current_layer.locked_by == locked_id:
logger.info(f"库位 {site_id} 已被同一锁定者 {locked_id} 锁定")
return {
"success": True,
"message": f"库位已被同一锁定者锁定库位ID: {site_id}"
}
# 被其他锁定者锁定,继续等待
logger.info(f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试(阻塞模式)")
await asyncio.sleep(retry_sleep)
continue
# 库位未被锁定,尝试锁定
lock_result = await self._attempt_lock_site(session, site_id, locked_id, map_id)
if lock_result["success"]:
success_msg = f"{attempts}次尝试锁定库位成功阻塞模式库位ID: {site_id}" if attempts > 1 else f"锁定库位成功阻塞模式库位ID: {site_id}"
logger.info(success_msg)
if not current_layer:
return {
"success": False,
"message": f"库位不存在: {site_id}"
}
# 检查库位是否已被锁定
if current_layer.is_locked:
# 如果是同一个锁定者,视为已锁定成功
if current_layer.locked_by == locked_id:
logger.info(f"库位 {site_id} 已被同一锁定者 {locked_id} 锁定")
return {
"success": True,
"message": success_msg
"message": f"库位已被同一锁定者锁定库位ID: {site_id}"
}
else:
# 锁定失败,继续重试
logger.info(f"库位 {site_id} 锁定失败(可能被其他进程抢占),第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试(阻塞模式)")
await asyncio.sleep(retry_sleep)
continue
else:
# 有限重试模式
max_attempts = retry_times + 1 # 包括首次尝试
logger.info(f"库位 {site_id} 进入有限重试模式,最多重试 {retry_times}")
while attempts < max_attempts:
attempts += 1
# 检查任务是否被取消
if context:
is_canceled = await context.is_task_canceled_async()
if context.is_failed:
return {
"success": False,
"message": f"任务已失败,停止重试锁定库位: {context.failure_reason}",
"is_failed": True,
"is_canceled": is_canceled
}
if is_canceled:
logger.info(f"检测到任务已被取消,停止重试锁定库位 {site_id}")
return {
"success": False,
"message": "任务已被取消,停止重试锁定库位",
"is_canceled": True
}
# 重新查询库位当前状态
# 被其他锁定者锁定,继续等待
logger.info(f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试(阻塞模式)")
await asyncio.sleep(retry_sleep)
continue
# 库位未被锁定,尝试锁定(使用新会话)
async with get_async_session_read_committed() as lock_session:
lock_result = await self._attempt_lock_site(lock_session, site_id, locked_id, map_id)
if lock_result["success"]:
success_msg = f"{attempts}次尝试锁定库位成功阻塞模式库位ID: {site_id}" if attempts > 1 else f"锁定库位成功阻塞模式库位ID: {site_id}"
logger.info(success_msg)
return {
"success": True,
"message": success_msg
}
else:
# 锁定失败,继续重试
logger.info(f"库位 {site_id} 锁定失败(可能被其他进程抢占),第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试(阻塞模式)")
await asyncio.sleep(retry_sleep)
continue
else:
# 有限重试模式
max_attempts = retry_times + 1 # 包括首次尝试
logger.info(f"库位 {site_id} 进入有限重试模式,最多重试 {retry_times}")
while attempts < max_attempts:
attempts += 1
# 检查任务是否被取消
if context:
is_canceled = await context.is_task_canceled_async()
if context.is_failed:
return {
"success": False,
"message": f"任务已失败,停止重试锁定库位: {context.failure_reason}",
"is_failed": True,
"is_canceled": is_canceled
}
if is_canceled:
logger.info(f"检测到任务已被取消,停止重试锁定库位 {site_id}")
return {
"success": False,
"message": "任务已被取消,停止重试锁定库位",
"is_canceled": True
}
# 使用新会话确保获取最新的已提交数据(有限重试模式)
# 重新查询库位当前状态
async with get_async_session_read_committed() as fresh_session:
current_site_query = select(OperatePointLayer).where(
OperatePointLayer.layer_name == site_id,
OperatePointLayer.scene_id == map_id,
OperatePointLayer.is_disabled == False,
)
current_site_result = await session.execute(current_site_query)
current_site_result = await fresh_session.execute(current_site_query)
current_layer = current_site_result.scalar_one_or_none()
if not current_layer:
return {
"success": False,
"message": f"库位不存在: {site_id}"
}
# 检查库位是否已被锁定
if current_layer.is_locked:
# 如果是同一个锁定者,视为已锁定成功
if current_layer.locked_by == locked_id:
logger.info(f"库位 {site_id} 已被同一锁定者 {locked_id} 锁定")
return {
"success": True,
"message": f"库位已被同一锁定者锁定库位ID: {site_id}"
}
# 被其他锁定者锁定,需要重试
if attempts < max_attempts:
logger.info(f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试")
await asyncio.sleep(retry_sleep)
continue
else:
# 达到最大重试次数,返回失败
return {
"success": False,
"message": f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,重试 {retry_times} 次后仍然失败"
}
# 库位未被锁定,尝试锁定
lock_result = await self._attempt_lock_site(session, site_id, locked_id, map_id)
if lock_result["success"]:
success_msg = f"{attempts}次尝试锁定库位成功库位ID: {site_id}" if attempts > 1 else f"锁定库位成功库位ID: {site_id}"
logger.info(success_msg)
if not current_layer:
return {
"success": False,
"message": f"库位不存在: {site_id}"
}
# 检查库位是否已被锁定
if current_layer.is_locked:
# 如果是同一个锁定者,视为已锁定成功
if current_layer.locked_by == locked_id:
logger.info(f"库位 {site_id} 已被同一锁定者 {locked_id} 锁定")
return {
"success": True,
"message": success_msg
"message": f"库位已被同一锁定者锁定库位ID: {site_id}"
}
# 被其他锁定者锁定,需要重试
if attempts < max_attempts:
logger.info(f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试")
await asyncio.sleep(retry_sleep)
continue
else:
# 更新失败,可能在更新期间被其他进程锁定
if attempts < max_attempts:
logger.info(f"库位 {site_id} 锁定失败(可能被其他进程抢占),第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试")
await asyncio.sleep(retry_sleep)
continue
else:
return {
"success": False,
"message": f"库位 {site_id} 锁定失败,可能被其他进程抢占,重试 {retry_times} 次后仍然失败"
}
# 不应该到达这里
return {
"success": False,
"message": f"库位 {site_id} 锁定失败,重试 {retry_times} 次后仍然失败"
}
# 达到最大重试次数,返回失败
return {
"success": False,
"message": f"库位 {site_id} 已被其他锁定者 {current_layer.locked_by} 锁定,重试 {retry_times} 次后仍然失败"
}
# 库位未被锁定,尝试锁定(使用新会话)
async with get_async_session_read_committed() as lock_session:
lock_result = await self._attempt_lock_site(lock_session, site_id, locked_id, map_id)
if lock_result["success"]:
success_msg = f"{attempts}次尝试锁定库位成功库位ID: {site_id}" if attempts > 1 else f"锁定库位成功库位ID: {site_id}"
logger.info(success_msg)
return {
"success": True,
"message": success_msg
}
else:
# 更新失败,可能在更新期间被其他进程锁定
if attempts < max_attempts:
logger.info(f"库位 {site_id} 锁定失败(可能被其他进程抢占),第 {attempts} 次尝试,等待 {retry_sleep} 秒后重试")
await asyncio.sleep(retry_sleep)
continue
else:
return {
"success": False,
"message": f"库位 {site_id} 锁定失败,可能被其他进程抢占,重试 {retry_times} 次后仍然失败"
}
# 不应该到达这里
return {
"success": False,
"message": f"库位 {site_id} 锁定失败,重试 {retry_times} 次后仍然失败"
}
except Exception as e:
logger.error(f"锁定库位异常: {str(e)}")
@ -2887,11 +2892,13 @@ class SetSiteUnlockedBlockHandler(StorageBlockHandler):
async def _set_site_unlocked_in_db(self, site_id: str, un_locked_id: str, map_id:str, current_block_name: str) -> Dict[str, Any]:
"""
在数据库中解锁库位
使用READ COMMITTED隔离级别确保解锁操作立即对其他并发事务可见
避免其他任务读取到过期的锁定状态
使用READ COMMITTED隔离级别和强制刷新确保读取到最新的已提交数据
"""
try:
async with get_async_session_read_committed() as session:
# 强制刷新会话缓存,确保读取到其他事务已提交的最新数据
session.expire_all()
# 先检查库位是否存在
site_check_query = select(OperatePointLayer).where(
OperatePointLayer.layer_name == site_id,
@ -2931,7 +2938,8 @@ class SetSiteUnlockedBlockHandler(StorageBlockHandler):
update_stmt = update(OperatePointLayer).where(
OperatePointLayer.layer_name == site_id,
OperatePointLayer.scene_id == map_id,
OperatePointLayer.is_locked == True # 只有锁定的库位才能解锁
OperatePointLayer.is_locked == True,
OperatePointLayer.locked_by == un_locked_id # 确保只有正确的锁定者能解锁
).values(
is_locked=False,
locked_by=None, # 清空锁定者
@ -2943,7 +2951,6 @@ class SetSiteUnlockedBlockHandler(StorageBlockHandler):
# 检查是否成功更新
if result.rowcount > 0:
await session.commit()
success_msg = f"解锁库位成功库位ID: {site_id}块id{current_block_name}"
logger.info(success_msg)
return {