增加超启发式线体优化算法

This commit is contained in:
2024-05-17 22:52:49 +08:00
parent 6fa1f53f69
commit 7c9a900b95
13 changed files with 1731 additions and 1109 deletions

View File

@ -29,7 +29,7 @@ head_nozzle = ['' for _ in range(max_head_index)] # 头上已经分配吸嘴
slotf1_pos, slotr1_pos = [-31.267, 44.], [807., 810.545] # F1(前基座最左侧)、R1(后基座最右侧)位置
fix_camera_pos = [269.531, 694.823] # 固定相机位置
anc_marker_pos = [336.457, 626.230] # ANC基准点位置
stopper_pos = [635.150, 124.738] # 止档块位置
stopper_pos = [535.150, 124.738] # 止档块位置
# 算法权重参数
e_nz_change, e_gang_pick = 4, 0.6
@ -48,6 +48,7 @@ nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN220': 6, 'CN400': 6, 'CN140': 6}
# 时间参数
t_cycle = 0.3
t_anc = 0.6
t_pick, t_place = .078, .051 # 贴装/拾取用时
t_nozzle_put, t_nozzle_pick = 0.9, 0.75 # 装卸吸嘴用时
t_nozzle_change = t_nozzle_put + t_nozzle_pick
@ -59,66 +60,22 @@ T_pp, T_tr, T_nc, T_pl = 2, 5, 25, 0
class OptInfo:
def __init__(self):
self.placement_time = 0
self.total_time = .0 # 总组装时间
self.total_points = .0 # 总贴装点数
self.cycle_counter = 0
self.nozzle_change_counter = 0
self.pickup_counter = 0
self.pickup_time = .0 # 拾取过程运动时间
self.round_time = .0 # 往返基座/基板运动时间
self.place_time = .0 # 贴装过程运动时间
self.operation_time = .0 # 拾取/贴装/换吸嘴等机械动作用时
self.pickup_movement = 0
self.placement_movement = 0
self.cycle_counter = 0 # 周期数
self.nozzle_change_counter = 0 # 吸嘴更换次数
self.anc_round_counter = 0 # 前往ANC次数
self.pickup_counter = 0 # 拾取次数
def optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False):
if nozzle_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
nozzle_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
nozzle_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
if index == -1:
nozzle_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
nozzle_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index].nz
print(nozzle_assign)
print('')
if component_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
component_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
component_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
if index == -1:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index].part
print(component_assign)
print('')
if feeder_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
feedr_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(feeder_slot_result):
feedr_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
slot = feeder_slot_result[cycle][head]
if slot == -1:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'A'
else:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'F{}'.format(
slot) if slot <= max_slot_index // 2 else 'R{}'.format(slot - max_head_index)
print(feedr_assign)
print('')
self.total_distance = .0 # 总移动路径
self.place_distance = .0 # 贴装移动路径
self.pickup_distance = .0 # 拾取移动路径
def axis_moving_time(distance, axis=0):
@ -172,8 +129,12 @@ def timer_wrapper(func):
def measure_time(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
print(f"function {func.__name__} running time : {time.time() - start_time:.3f} s")
hinter = True
for key, val in kwargs.items():
if key == 'hinter':
hinter = val
if hinter:
print(f"function {func.__name__} running time : {time.time() - start_time:.3f} s")
return result
return measure_time
@ -440,7 +401,7 @@ def dynamic_programming_cycle_path(pcb_data, cycle_placement, assigned_feeder):
@timer_wrapper
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result):
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, hinter=True):
placement_result, head_sequence_result = [], []
mount_point_index = [[] for _ in range(len(component_data))]
mount_point_pos = [[] for _ in range(len(component_data))]
@ -951,7 +912,7 @@ def constraint_swap_mutation(component_points, individual, machine_number):
offspring = individual.copy()
idx, component_index = 0, random.randint(0, len(component_points) - 1)
for _, points in component_points:
for points in component_points.values():
if component_index == 0:
while True:
index1, index2 = random.sample(range(points + machine_number - 2), 2)
@ -988,6 +949,7 @@ def random_selective(data, possibility): # 依概率选择随机数
possibility = [p / sum_val for p in possibility]
random_val = random.random()
idx = 0
for idx, val in enumerate(possibility):
random_val -= val
if random_val <= 0:
@ -1061,17 +1023,25 @@ def get_line_config_number(machine_number, component_number):
return div_counter
def partial_data_convert(pcb_data, component_data, machine_assign, machine_number):
assignment_result = copy.deepcopy(machine_assign)
def convert_line_assigment(pcb_data, component_data, assignment_result):
machine_number = len(assignment_result)
placement_points = []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(max_machine_index)]
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
partial_component_data[machine_index].loc[part_index, 'points'] = 0
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
@ -1079,7 +1049,7 @@ def partial_data_convert(pcb_data, component_data, machine_assign, machine_numbe
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index]['feeder-limit'] = arg_feeder
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(machine_number):
@ -1088,27 +1058,126 @@ def partial_data_convert(pcb_data, component_data, machine_assign, machine_numbe
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index]['feeder-limit'] += 1
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index]['feeder-limit'] > 0
assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
# === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))]
part2idx = defaultdict(int)
for idx, data in component_data.iterrows():
part2idx[data.part] = idx
machine_average_pos = [[0, 0] for _ in range(machine_number)]
machine_step_counter = [0 for _ in range(machine_number)]
part_pcb_data = defaultdict(list)
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == 0:
component_machine_index[part_index] += 1
machine_index += 1
else:
break
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
part_pcb_data[part2idx[data.part]].append(data)
multiple_component_index = []
for part_index in range(len(component_data)):
machine_assign_set = []
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index]:
machine_assign_set.append(machine_index)
if len(machine_assign_set) == 1:
for data in part_pcb_data[part_index]:
machine_index = machine_assign_set[0]
machine_average_pos[machine_index][0] += data.x
machine_average_pos[machine_index][1] += data.y
machine_step_counter[machine_index] += 1
partial_component_data[machine_index].loc[part_index, 'points'] += 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
elif len(machine_assign_set) > 1:
multiple_component_index.append(part_index)
for machine_index in range(machine_number):
if machine_step_counter[machine_index] == 0:
continue
machine_average_pos[machine_index][0] /= machine_step_counter[machine_index]
machine_average_pos[machine_index][1] /= machine_step_counter[machine_index]
for part_index in multiple_component_index:
for data in part_pcb_data[part_index]:
idx = -1
min_dist = None
for machine_index in range(machine_number):
if partial_component_data[machine_index].loc[part_index, 'points'] >= \
assignment_result[machine_index][part_index]:
continue
dist = (data.x - machine_average_pos[machine_index][0]) ** 2 + (
data.y - machine_average_pos[machine_index][1]) ** 2
if min_dist is None or dist < min_dist:
min_dist, idx = dist, machine_index
assert idx >= 0
machine_step_counter[idx] += 1
machine_average_pos[idx][0] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][0] + data.x / \
machine_step_counter[idx]
machine_average_pos[idx][1] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][1] + data.y / \
machine_step_counter[idx]
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
# for machine_index, data in partial_pcb_data.items():
# part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
# for part_index, cp_data in partial_component_data[machine_index].iterrows():
# if assignment_result[machine_index][part_index]:
# part_info.append(
# [part_index, assignment_result[machine_index][part_index], 1, cp_data['feeder-limit']])
#
# part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
# start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
# while start_index < len(part_info):
# assign_part_point, assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# assign_part_index.append(idx_)
#
# variance = np.std(assign_part_point)
# while start_index <= end_index:
# part_info_index = assign_part_index[np.argmax(assign_part_point)]
#
# if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
# part_info[part_info_index][2] += 1
# end_index -= 1
#
# new_assign_part_point, new_assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# new_assign_part_index.append(idx_)
#
# new_variance = np.std(new_assign_part_point)
# if variance < new_variance:
# part_info[part_info_index][2] -= 1
# end_index += 1
# break
#
# variance = new_variance
# assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
# else:
# break
#
# start_index = end_index + 1
# end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
#
# max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
# for info in part_info:
# partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
return partial_pcb_data, partial_component_data