整线优化第一版论文定稿工程

增加了整线批量测试
修改了现有min-max模型路径
修改了遗传算法整体框架
估计器增加异常数据剔除
封装优化结果类
修改供料器扫描算法中重复吸嘴组的判定
This commit is contained in:
2024-06-26 09:44:08 +08:00
parent cbeba48da0
commit 37f4e5b02c
14 changed files with 749 additions and 669 deletions

View File

@ -11,6 +11,7 @@ import math
import random
import copy
import torch
import torch.nn
import argparse
import joblib
import pickle
@ -20,6 +21,7 @@ import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import traceback
matplotlib.use('TkAgg')
@ -64,6 +66,18 @@ t_fix_camera_check = 0.12 # 固定相机检测时间
# 时间参数(整线相关)
T_pp, T_tr, T_nc, T_pl = 2, 5, 25, 0
# 时间参数 (数据拟合获得)
Fit_cy, Fit_nz, Fit_pu, Fit_pl, Fit_mv = 0.326, 0.8694, 0.159, 0.041, 0.001
class OptResult:
def __init__(self, cp_assign=None, cycle_assign=None, slot_assign=None, place_assign=None, sequence_assign=None):
self.component_assign = [] if cp_assign is None else cp_assign
self.cycle_assign = [] if cycle_assign is None else cycle_assign
self.feeder_slot_assign = [] if slot_assign is None else slot_assign
self.placement_assign = [] if place_assign is None else place_assign
self.head_sequence = [] if sequence_assign is None else sequence_assign
class OptInfo:
def __init__(self):
@ -93,6 +107,22 @@ class OptInfo:
print(f'-Pick operation counter: {self.pickup_counter: d}')
print(f'-Pick time: {self.pickup_time: .3f}, Pick distance: {self.pickup_distance: .3f}')
print(f'-Place time: {self.place_time: .3f}, Place distance: {self.place_distance: .3f}')
print(
f'-Round time: {self.total_time - self.place_time - self.place_time: .3f}, Place distance: '
f'{self.total_distance - self.pickup_distance - self.place_distance: .3f}')
minutes, seconds = int(self.total_time // 60), int(self.total_time) % 60
millisecond = int((self.total_time - minutes * 60 - seconds) * 60)
print(f'-Operation time: {self.operation_time: .3f}, ', end='')
if minutes > 0:
print(f'Total time: {minutes: d} min {seconds} s {millisecond: 2d} ms ({self.total_time: .3f}s)')
else:
print(f'Total time: {seconds} s {millisecond :2d} ms ({self.total_time :.3f}s)')
def metric(self):
return Fit_cy * self.cycle_counter + Fit_nz * self.nozzle_change_counter + Fit_pu * self.pickup_counter + \
Fit_pl * self.total_points + Fit_mv * self.pickup_distance
def axis_moving_time(distance, axis=0):
@ -438,6 +468,8 @@ def greedy_placement_route_generation(component_data, pcb_data, component_result
for cycle_set in range(len(component_result)):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
if sum(component_result[cycle_set]) == -max_head_index:
continue
# search_dir = 1 - search_dir
assigned_placement = [-1] * max_head_index
max_pos = [max(mount_point_pos[component_index], key=lambda x: x[0]) for component_index in
@ -936,7 +968,7 @@ def constraint_swap_mutation(component_points, individual, machine_number):
for points in component_points.values():
if component_index == 0:
while True:
index1, index2 = random.sample(range(points + machine_number - 2), 2)
index1, index2 = random.sample(range(points + machine_number - 1), 2)
if offspring[idx + index1] != offspring[idx + index2]:
break
@ -1050,11 +1082,13 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
placement_points = []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
if pcb_data is not None:
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assert sum(placement_points) == len(pcb_data)
if pcb_data is not None:
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
@ -1062,7 +1096,11 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
partial_component_data[machine_index].loc[part_index, 'points'] = 0
if pcb_data is None:
partial_component_data[machine_index].loc[part_index, 'points'] = assignment_result[machine_index][
part_index]
else:
partial_component_data[machine_index].loc[part_index, 'points'] = 0
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
@ -1084,80 +1122,81 @@ def convert_line_assigment(pcb_data, component_data, assignment_result):
partial_component_data[machine_index].loc[part_index].fdn > feeder_points[
assign_machine] / partial_component_data[assign_machine].loc[part_index].fdn:
assign_machine = machine_index
partial_component_data[assign_machine].loc[part_index, 'fdn'] += 1
feeder_limit -= 1
assert assign_machine is not None
partial_component_data[assign_machine].loc[part_index, 'fdn'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index].fdn > 0
# === assign placements ===
part2idx = defaultdict(int)
for idx, data in component_data.iterrows():
part2idx[data.part] = idx
if pcb_data is not None:
part2idx = defaultdict(int)
for idx, data in component_data.iterrows():
part2idx[data.part] = idx
machine_average_pos = [[0, 0] for _ in range(machine_number)]
machine_step_counter = [0 for _ in range(machine_number)]
part_pcb_data = defaultdict(list)
for _, data in pcb_data.iterrows():
part_pcb_data[part2idx[data.part]].append(data)
machine_average_pos = [[0, 0] for _ in range(machine_number)]
machine_step_counter = [0 for _ in range(machine_number)]
part_pcb_data = defaultdict(list)
for _, data in pcb_data.iterrows():
part_pcb_data[part2idx[data.part]].append(data)
multiple_component_index = []
for part_index in range(len(component_data)):
machine_assign_set = []
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index]:
machine_assign_set.append(machine_index)
if len(machine_assign_set) == 1:
for data in part_pcb_data[part_index]:
machine_index = machine_assign_set[0]
machine_average_pos[machine_index][0] += data.x
machine_average_pos[machine_index][1] += data.y
machine_step_counter[machine_index] += 1
partial_component_data[machine_index].loc[part_index, 'points'] += 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
elif len(machine_assign_set) > 1:
multiple_component_index.append(part_index)
for machine_index in range(machine_number):
if machine_step_counter[machine_index] == 0:
continue
machine_average_pos[machine_index][0] /= machine_step_counter[machine_index]
machine_average_pos[machine_index][1] /= machine_step_counter[machine_index]
for part_index in multiple_component_index:
for data in part_pcb_data[part_index]:
idx = -1
min_dist = None
multiple_component_index = []
for part_index in range(len(component_data)):
machine_assign_set = []
for machine_index in range(machine_number):
if partial_component_data[machine_index].loc[part_index, 'points'] >= \
assignment_result[machine_index][part_index]:
continue
dist = (data.x - machine_average_pos[machine_index][0]) ** 2 + (
data.y - machine_average_pos[machine_index][1]) ** 2
if min_dist is None or dist < min_dist:
min_dist, idx = dist, machine_index
if assignment_result[machine_index][part_index]:
machine_assign_set.append(machine_index)
assert idx >= 0
machine_step_counter[idx] += 1
machine_average_pos[idx][0] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][0] + data.x / \
machine_step_counter[idx]
machine_average_pos[idx][1] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][1] + data.y / \
machine_step_counter[idx]
if len(machine_assign_set) == 1:
for data in part_pcb_data[part_index]:
machine_index = machine_assign_set[0]
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
machine_average_pos[machine_index][0] += data.x
machine_average_pos[machine_index][1] += data.y
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
machine_step_counter[machine_index] += 1
partial_component_data[machine_index].loc[part_index, 'points'] += 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
elif len(machine_assign_set) > 1:
multiple_component_index.append(part_index)
for machine_index in range(machine_number):
if machine_step_counter[machine_index] == 0:
continue
machine_average_pos[machine_index][0] /= machine_step_counter[machine_index]
machine_average_pos[machine_index][1] /= machine_step_counter[machine_index]
for part_index in multiple_component_index:
for data in part_pcb_data[part_index]:
idx = -1
min_dist = None
for machine_index in range(machine_number):
if partial_component_data[machine_index].loc[part_index, 'points'] >= \
assignment_result[machine_index][part_index]:
continue
dist = (data.x - machine_average_pos[machine_index][0]) ** 2 + (
data.y - machine_average_pos[machine_index][1]) ** 2
if min_dist is None or dist < min_dist:
min_dist, idx = dist, machine_index
assert idx >= 0
machine_step_counter[idx] += 1
machine_average_pos[idx][0] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][0] \
+ data.x / machine_step_counter[idx]
machine_average_pos[idx][1] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][1] \
+ data.y / machine_step_counter[idx]
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
return partial_pcb_data, partial_component_data

View File

@ -10,33 +10,33 @@ from base_optimizer.smopt_mathmodel import *
from base_optimizer.result_analysis import *
def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, method='', hinter=False):
def base_optimizer(machine_index, pcb_data, component_data, feeder_data, params, hinter=False):
if method == 'cell-division': # 基于元胞分裂的遗传算法
if params.machine_optimizer == 'cell-division': # 基于元胞分裂的遗传算法
component_result, cycle_result, feeder_slot_result = optimizer_celldivision(pcb_data, component_data)
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result)
elif method == 'feeder-scan': # 基于基座扫描的供料器优先算法
elif params.machine_optimizer == 'feeder-scan': # 基于基座扫描的供料器优先算法
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data)
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result)
# placement_result, head_sequence = beam_search_for_route_generation(component_data, pcb_data, component_result,
# cycle_result, feeder_slot_result)
elif method == 'hybrid-genetic': # 基于拾取组的混合遗传算法
elif params.machine_optimizer == 'hybrid-genetic': # 基于拾取组的混合遗传算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic(
pcb_data, component_data, hinter=hinter)
elif method == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
elif params.machine_optimizer == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation(
component_data, pcb_data)
elif method == 'genetic-scanning':
elif params.machine_optimizer == 'genetic-scanning':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning(
component_data, pcb_data, hinter=hinter)
elif method == 'mip-model':
elif params.machine_optimizer == 'mip-model':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_mathmodel(
component_data, pcb_data, hinter=hinter)
elif method == "two-phase":
elif params.machine_optimizer == "two-phase":
component_result, feeder_slot_result, cycle_result = gurobi_optimizer(pcb_data, component_data, feeder_data,
initial=True, partition=True,
reduction=True, hinter=hinter)
@ -44,17 +44,21 @@ def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, me
placement_result, head_sequence = scan_based_placement_route_generation(component_data, pcb_data,
component_result, cycle_result)
else:
raise 'machine optimizer method ' + method + ' is not existed'
raise 'machine optimizer method ' + params.method + ' is not existed'
print('----- Placement machine ' + str(machine_index) + ' ----- ')
opt_res = OptResult(component_result, cycle_result, feeder_slot_result, placement_result, head_sequence)
# 估算贴装用时
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence, hinter=False)
info = placement_info_evaluation(component_data, pcb_data, opt_res, hinter=False)
if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
optimization_assign_result(component_data, pcb_data, opt_res, nozzle_hinter=True, component_hinter=True,
feeder_hinter=True)
info.print()
print('------------------------------ ')
print('------------------------------ ')
if params.save:
output_optimize_result(
f'result/{params.filename[:-4]}-{params.line_optimizer}-M0{machine_index} {params.save_suffix}',
component_data, pcb_data, opt_res)
return info

View File

@ -2,8 +2,7 @@ from base_optimizer.optimizer_common import *
def convert_pcbdata_to_result(pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = [], [], []
placement_result, head_sequence_result = [], []
opt_res = OptResult()
assigned_part = [-1 for _ in range(max_head_index)]
assigned_slot = [-1 for _ in range(max_head_index)]
@ -14,19 +13,19 @@ def convert_pcbdata_to_result(pcb_data, component_data):
for point_cnt in range(point_num + 1):
cycle_start = 1 if point_cnt == point_num else pcb_data.loc[point_cnt, 'cs']
if (cycle_start and point_cnt != 0) or -1 not in assigned_part:
if (cycle_start and point_cnt != 0) or -1 not in assigned_part:
if len(component_result) != 0 and component_result[-1] == assigned_part:
cycle_result[-1] += 1
if len(opt_res.component_assign) != 0 and opt_res.component_assign[-1] == assigned_part:
opt_res.cycle_assign[-1] += 1
else:
component_result.append(assigned_part)
feeder_slot_result.append(assigned_slot)
cycle_result.append(1)
opt_res.component_assign.append(assigned_part)
opt_res.feeder_slot_assign.append(assigned_slot)
opt_res.cycle_assign.append(1)
# assigned_sequence = list(reversed(assigned_sequence)) # Samsung拾取顺序相反
placement_result.append(assigned_point)
head_sequence_result.append(assigned_sequence)
opt_res.placement_assign.append(assigned_point)
opt_res.head_sequence.append(assigned_sequence)
assigned_part = [-1 for _ in range(max_head_index)]
assigned_slot = [-1 for _ in range(max_head_index)]
@ -50,36 +49,35 @@ def convert_pcbdata_to_result(pcb_data, component_data):
assigned_point[head] = point_cnt
assigned_sequence.append(head)
return component_result, cycle_result, feeder_slot_result, placement_result, head_sequence_result
return opt_res
# 绘制各周期从供料器周期拾取的元件位置
def pickup_cycle_schematic(feeder_slot_result, cycle_result):
def pickup_cycle_schematic(optimizer_result):
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
# data
bar_width = .7
feeder_part = np.zeros(int(max_slot_index / 2), dtype=np.int)
for cycle in range(len(feeder_slot_result)):
for cycle in range(len(optimizer_result.feeder_slot_assign)):
label_str = '周期' + str(cycle + 1)
cur_feeder_part = np.zeros(int(max_slot_index / 2), dtype=np.int)
for slot in feeder_slot_result[cycle]:
for slot in optimizer_result.feeder_slot_assign[cycle]:
if slot > 0:
cur_feeder_part[slot] += cycle_result[cycle]
cur_feeder_part[slot] += optimizer_result.cycle_assign[cycle]
plt.bar(np.arange(max_slot_index / 2), cur_feeder_part, bar_width, edgecolor='black', bottom=feeder_part,
label=label_str)
for slot in feeder_slot_result[cycle]:
for slot in optimizer_result.feeder_slot_assign[cycle]:
if slot > 0:
feeder_part[slot] += cycle_result[cycle]
feeder_part[slot] += optimizer_result.cycle_assign[cycle]
plt.legend()
plt.show()
def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_slot_result, placement_result,
head_sequence, cycle=-1):
def placement_route_schematic(pcb_data, optimizer_result, cycle=-1):
plt.figure('cycle {}'.format(cycle + 1))
pos_x, pos_y = [], []
@ -89,8 +87,8 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
# plt.text(pcb_data.loc[i]['x'], pcb_data.loc[i]['y'] + 0.1, '%d' % i, ha='center', va = 'bottom', size = 8)
mount_pos = []
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
plt.text(pos_x[index], pos_y[index] + 0.1, 'HD%d' % (head + 1), ha='center', va='bottom', size=10)
plt.plot([pos_x[index], pos_x[index] - head * head_interval], [pos_y[index], pos_y[index]], linestyle='-.',
color='black', linewidth=1)
@ -105,9 +103,9 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
linewidth=1)
draw_x, draw_y = [], []
for c in range(cycle, len(placement_result)):
for c in range(cycle, len(optimizer_result.placement_assign)):
for h in range(max_head_index):
i = placement_result[c][h]
i = optimizer_result.placement_assign[c][h]
if i == -1:
continue
draw_x.append(pcb_data.loc[i]['x'] + stopper_pos[0])
@ -124,18 +122,18 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
feeder_part, feeder_counter = {}, {}
placement_cycle = 0
for cycle_, components in enumerate(component_result):
for cycle_, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
placement = placement_result[placement_cycle][head]
slot = feeder_slot_result[cycle_][head]
placement = optimizer_result.placement_assign[placement_cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle_][head]
feeder_part[slot] = pcb_data.loc[placement]['part']
if slot not in feeder_counter.keys():
feeder_counter[slot] = 0
feeder_counter[slot] += cycle_result[cycle_]
placement_cycle += cycle_result[cycle_]
feeder_counter[slot] += optimizer_result.cycle_assign[cycle_]
placement_cycle += optimizer_result.cycle_assign[cycle_]
for slot, part in feeder_part.items():
plt.text(slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1] + 15,
@ -153,9 +151,9 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
# 绘制拾取路径
pick_slot = []
cycle_group = 0
while sum(cycle_result[0: cycle_group + 1]) < cycle:
while sum(optimizer_result.cycle_assign[0: cycle_group + 1]) < cycle:
cycle_group += 1
for head, slot in enumerate(feeder_slot_result[cycle_group]):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
pick_slot.append(slot - head * interval_ratio)
@ -164,10 +162,10 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
next_cycle_group = 0
next_pick_slot = max_slot_index
while sum(cycle_result[0: next_cycle_group + 1]) < cycle + 1:
while sum(optimizer_result.cycle_assign[0: next_cycle_group + 1]) < cycle + 1:
next_cycle_group += 1
if next_cycle_group < len(feeder_slot_result):
for head, slot in enumerate(feeder_slot_result[cycle_group]):
if next_cycle_group < len(optimizer_result.feeder_slot_assign):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
next_pick_slot = min(next_pick_slot, slot - head * interval_ratio)
@ -185,8 +183,7 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
plt.show()
def save_placement_route_figure(file_name, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence):
def save_placement_route_figure(file_name, pcb_data, optimizer_result):
path = 'result/' + file_name[:file_name.find('.')]
if not os.path.exists(path):
os.mkdir(path)
@ -199,12 +196,12 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
with tqdm(total=100) as pbar:
pbar.set_description('save figure')
for cycle in range(len(placement_result)):
for cycle in range(len(optimizer_result.placement_assign)):
plt.figure(cycle)
mount_pos = []
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
plt.text(pos_x[index], pos_y[index] + 0.1, 'HD%d' % (head + 1), ha='center', va='bottom', size=10)
plt.plot([pos_x[index], pos_x[index] - head * head_interval], [pos_y[index], pos_y[index]],
linestyle='-.', color='black', linewidth=1)
@ -217,9 +214,9 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
linewidth=1)
draw_x, draw_y = [], []
for c in range(cycle, len(placement_result)):
for c in range(cycle, len(optimizer_result.placement_assign)):
for h in range(max_head_index):
i = placement_result[c][h]
i = optimizer_result.placement_assign[c][h]
if i == -1:
continue
draw_x.append(pcb_data.loc[i]['x'] + stopper_pos[0])
@ -235,18 +232,18 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
feeder_part, feeder_counter = {}, {}
placement_cycle = 0
for cycle_, components in enumerate(component_result):
for cycle_, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
placement = placement_result[placement_cycle][head]
slot = feeder_slot_result[cycle_][head]
placement = optimizer_result.placement_assign[placement_cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle_][head]
feeder_part[slot] = pcb_data.loc[placement]['part']
if slot not in feeder_counter.keys():
feeder_counter[slot] = 0
feeder_counter[slot] += cycle_result[cycle_]
placement_cycle += cycle_result[cycle_]
feeder_counter[slot] += optimizer_result.cycle_assign[cycle_]
placement_cycle += optimizer_result.cycle_assign[cycle_]
for slot, part in feeder_part.items():
plt.text(slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1] + 15,
@ -266,9 +263,9 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
# 绘制拾取路径
pick_slot = []
cycle_group = 0
while sum(cycle_result[0: cycle_group + 1]) < cycle:
while sum(optimizer_result.cycle_assign[0: cycle_group + 1]) < cycle:
cycle_group += 1
for head, slot in enumerate(feeder_slot_result[cycle_group]):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
pick_slot.append(slot - head * interval_ratio)
@ -286,46 +283,31 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
plt.savefig(path + '/cycle_{}'.format(cycle + 1))
plt.close(cycle)
pbar.update(100 / len(placement_result))
pbar.update(100 / len(optimizer_result.placement_assign))
def output_optimize_result(file_name, method, component_data, pcb_data, feeder_data, component_result, cycle_result,
feeder_slot_result, placement_result, head_sequence):
assert len(component_result) == len(feeder_slot_result)
if feeder_data is None:
warning_info = 'file: ' + file_name + ' optimize result is not existed!'
warnings.warn(warning_info, UserWarning)
return
def output_optimize_result(file_path, component_data, pcb_data, optimizer_result):
assert len(optimizer_result.component_assign) == len(optimizer_result.feeder_slot_assign)
output_data = pcb_data.copy(deep=True)
# 默认ANC参数
anc_list = defaultdict(list)
anc_list['CN065'] = list(range(14, 25, 2))
anc_list['CN220'] = list(range(15, 26, 2))
anc_list['CN020'] = list(range(15, 26, 2))
anc_list['CN140'] = list(range(26, 37, 2))
anc_list['CN400'] = list(range(27, 38, 2))
# 更新供料器组参数
for cycle_set in range(len(cycle_result)):
for head, component in enumerate(component_result[cycle_set]):
if component == -1:
continue
if feeder_data[feeder_data['slot'] == feeder_slot_result[cycle_set][head]].index.empty:
part = component_data.loc[component]['part']
feeder_data.loc[len(feeder_data.index)] = [feeder_slot_result[cycle_set][head], part, 0]
feeder_data.sort_values('slot', inplace=True, ascending=True, ignore_index=True)
anc_list['CN040'] = list(range(27, 38, 2))
placement_index = []
assigned_nozzle, assigned_anc_hole = ['' for _ in range(max_head_index)], [-1 for _ in range(max_head_index)]
for cycle_set in range(len(cycle_result)):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle_set in range(len(optimizer_result.cycle_assign)):
floor_cycle, ceil_cycle = sum(optimizer_result.cycle_assign[:cycle_set]), sum(optimizer_result.cycle_assign[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
cycle_start = True
cycle_nozzle = ['' for _ in range(max_head_index)]
head_indexes = [-1 for _ in range(max_head_index)]
for head in head_sequence[cycle]:
index_ = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index_ = optimizer_result.placement_assign[cycle][head]
if index_ == -1:
continue
head_indexes[head] = index_
@ -338,14 +320,14 @@ def output_optimize_result(file_name, method, component_data, pcb_data, feeder_d
cycle_start = False
# 供料器信息
slot = feeder_slot_result[cycle_set][head]
slot = optimizer_result.feeder_slot_assign[cycle_set][head]
fdr = 'F' + str(slot) if slot < max_slot_index // 2 else 'R' + str(slot - max_slot_index // 2)
feeder_index = feeder_data[feeder_data['slot'] == slot].index.tolist()[0]
output_data.loc[index_, 'fdr'] = fdr + ' ' + feeder_data.loc[feeder_index, 'part']
output_data.loc[index_, 'fdr'] = fdr + ' ' + component_data.loc[
optimizer_result.component_assign[cycle_set][head], 'part']
# ANC信息
cycle_nozzle[head] = component_data.loc[component_result[cycle_set][head], 'nz']
cycle_nozzle[head] = component_data.loc[optimizer_result.component_assign[cycle_set][head], 'nz']
for head in range(max_head_index):
nozzle = cycle_nozzle[head]
@ -373,26 +355,25 @@ def output_optimize_result(file_name, method, component_data, pcb_data, feeder_d
if 'desc' not in output_data.columns:
column_index = int(np.where(output_data.columns.values.reshape(-1) == 'part')[0][0])
output_data.insert(loc=column_index + 1, column='desc', value='')
file_dir = file_path[:file_path.rfind('/') + 1]
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if not os.path.exists('result/' + method):
os.makedirs('result/' + method)
file_name = method + '/' + file_name.split('.')[0] + '.xlsx'
output_data.to_excel('result/' + file_name, sheet_name='tb1', float_format='%.3f', na_rep='')
output_data.to_excel(file_path + '.xlsx', sheet_name='tb1', float_format='%.3f', na_rep='')
def optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False):
def optimization_assign_result(component_data, pcb_data, optimizer_result, nozzle_hinter=False, component_hinter=False,
feeder_hinter=False):
if nozzle_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
nozzle_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
for cycle, components in enumerate(optimizer_result.component_assign):
nozzle_assign_row = len(nozzle_assign)
nozzle_assign.loc[nozzle_assign_row, 'cycle'] = cycle_result[cycle]
nozzle_assign.loc[nozzle_assign_row, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
index = optimizer_result.component_assign[cycle][head]
if index == -1:
nozzle_assign.loc[nozzle_assign_row, 'H{}'.format(head + 1)] = ''
else:
@ -414,15 +395,14 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
component_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
component_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for cycle, components in enumerate(optimizer_result.component_assign):
component_assign.loc[cycle, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
index = optimizer_result.component_assign[cycle][head]
if index == -1:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
part = component_data.loc[index]['part']
component_assign.loc[cycle, 'H{}'.format(head + 1)] = part
component_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index]['part']
# component_assign.loc[cycle, 'H{}'.format(head + 1)] = 'C' + str(index)
print(component_assign)
@ -432,41 +412,43 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
feedr_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(feeder_slot_result):
feedr_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for cycle, components in enumerate(optimizer_result.feeder_slot_assign):
feedr_assign.loc[cycle, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
slot = feeder_slot_result[cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle][head]
if slot == -1:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'A'
else:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'F{}'.format(
slot) if slot <= max_slot_index // 2 else 'R{}'.format(slot - max_head_index)
try:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'F{}'.format(
slot) if slot <= max_slot_index // 2 else 'R{}'.format(slot - max_head_index)
except:
print('')
print(feedr_assign)
print('')
def placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result=None, head_sequence=None, hinter=False):
def placement_info_evaluation(component_data, pcb_data, optimizer_result, hinter=False):
# === 优化结果参数 ===
info = OptInfo()
# === 校验 ===
info.total_points = 0
for cycle, components in enumerate(component_result):
for cycle, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
info.total_points += cycle_result[cycle]
info.total_points += optimizer_result.cycle_assign[cycle]
if info.total_points != len(pcb_data):
warning_info = 'the number of placement points is not match with the PCB data. '
warnings.warn(warning_info, UserWarning)
return OptInfo()
if placement_result:
if optimizer_result.placement_assign:
total_points = info.total_points
for placements in placement_result:
for placements in optimizer_result.placement_assign:
for placement in placements:
if placement == -1:
continue
@ -479,11 +461,11 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
return OptInfo()
feeder_arrangement = defaultdict(set)
for cycle, feeder_slots in enumerate(feeder_slot_result):
for cycle, feeder_slots in enumerate(optimizer_result.feeder_slot_assign):
for head, slot in enumerate(feeder_slots):
if slot == -1:
continue
feeder_arrangement[component_result[cycle][head]].add(slot)
feeder_arrangement[optimizer_result.component_assign[cycle][head]].add(slot)
info.total_components = len(feeder_arrangement.keys())
for part, data in component_data.iterrows():
@ -497,24 +479,26 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
# 初始化首个周期的吸嘴装配信息
nozzle_assigned = ['Empty' for _ in range(max_head_index)]
for head in range(max_head_index):
for cycle in range(len(component_result)):
idx = component_result[cycle][head]
for cycle in range(len(optimizer_result.component_assign)):
idx = optimizer_result.component_assign[cycle][head]
if idx == -1:
continue
else:
nozzle_assigned[head] = component_data.loc[idx]['nz']
for cycle_set, _ in enumerate(component_result):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle_set, _ in enumerate(optimizer_result.component_assign):
floor_cycle, ceil_cycle = sum(optimizer_result.cycle_assign[:cycle_set]), sum(optimizer_result.cycle_assign[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
if sum(optimizer_result.component_assign[cycle_set]) == -max_head_index:
continue
pick_slot, mount_pos, mount_angle = [], [], []
nozzle_pick_counter, nozzle_put_counter = 0, 0 # 吸嘴更换次数统计(拾取/放置分别算一次)
for head in range(max_head_index):
if feeder_slot_result[cycle_set][head] != -1:
pick_slot.append(feeder_slot_result[cycle_set][head] - interval_ratio * head)
if component_result[cycle_set][head] == -1:
if optimizer_result.feeder_slot_assign[cycle_set][head] != -1:
pick_slot.append(optimizer_result.feeder_slot_assign[cycle_set][head] - interval_ratio * head)
if optimizer_result.component_assign[cycle_set][head] == -1:
continue
nozzle = component_data.loc[component_result[cycle_set][head]]['nz']
nozzle = component_data.loc[optimizer_result.component_assign[cycle_set][head]]['nz']
if nozzle != nozzle_assigned[head]:
if nozzle_assigned[head] != 'Empty':
nozzle_put_counter += 1
@ -557,9 +541,9 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
# 固定相机检测
# for head in range(max_head_index):
# if component_result[cycle_set][head] == -1:
# if optimizer_result.component_assign[cycle_set][head] == -1:
# continue
# camera = component_data.loc[component_result[cycle_set][head]]['camera']
# camera = component_data.loc[optimizer_result.component_assign[cycle_set][head]]['camera']
# if camera == '固定相机':
# next_pos = [fix_camera_pos[0] - head * head_interval, fix_camera_pos[1]]
# move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
@ -571,9 +555,9 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
# cur_pos = next_pos
# 贴装路径
if placement_result and head_sequence:
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
if optimizer_result.placement_assign and optimizer_result.head_sequence:
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
if index == -1:
continue
mount_pos.append([pcb_data.iloc[index]['x'] - head * head_interval + stopper_pos[0],
@ -602,31 +586,11 @@ def placement_info_evaluation(component_data, pcb_data, component_result, cycle_
info.nozzle_change_counter += nozzle_put_counter + nozzle_pick_counter
info.total_time = info.pickup_time + info.round_time + info.place_time + info.operation_time
minutes, seconds = int(info.total_time // 60), int(info.total_time) % 60
millisecond = int((info.total_time - minutes * 60 - seconds) * 60)
info.cycle_counter = sum(cycle_result)
info.cycle_counter = sum(optimizer_result.cycle_assign)
if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False)
print('-Cycle counter: {}'.format(info.cycle_counter))
print('-Nozzle change counter: {}'.format(info.nozzle_change_counter // 2))
print('-Pick operation counter: {}'.format(info.pickup_counter))
print('-Expected mounting tour length: {} mm'.format(info.place_distance))
print('-Expected picking tour length: {} mm'.format(info.pickup_distance))
print('-Expected total tour length: {} mm'.format(info.total_distance))
print('-Expected total moving time: {} s with pick: {}, round: {}, place = {}'.format(
info.pickup_time + info.round_time + info.place_time, info.pickup_time, info.round_time,
info.place_time))
print('-Expected total operation time: {} s'.format(info.operation_time))
if minutes > 0:
print('-Mounting time estimation: {:d} min {} s {:2d} ms ({:.3f}s)'.format(minutes, seconds, millisecond,
info.total_time))
else:
print('-Mounting time estimation: {} s {:2d} ms ({:.3f}s)'.format(seconds, millisecond, info.total_time))
optimization_assign_result(component_data, pcb_data, optimizer_result, nozzle_hinter=False,
component_hinter=False, feeder_hinter=False)
info.print()
return info

View File

@ -1,4 +1,5 @@
import math
from functools import reduce
from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import placement_info_evaluation
@ -17,9 +18,8 @@ def feeder_priority_assignment(component_data, pcb_data, hinter=True):
feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figure=False)
# 第3步扫描供料器基座确定元件拾取的先后顺序
component_assign, cycle_assign, feeder_slot_assign = feeder_base_scan(component_data, pcb_data, feeder_data)
info = placement_info_evaluation(component_data, pcb_data, component_assign, cycle_assign,
feeder_slot_assign, None, None, hinter=False)
info = placement_info_evaluation(component_data, pcb_data, OptResult(component_assign, cycle_assign,
feeder_slot_assign), hinter=False)
val = 0.356 * info.cycle_counter + 0.949 * info.nozzle_change_counter + 0.159 * info.pickup_counter \
+ 0.002 * info.pickup_distance
@ -66,7 +66,11 @@ def feeder_nozzle_pattern(component_data):
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
num_permu = reduce(lambda x, y: x * y, range(1, len(nozzle_indices.keys()) + 1))
num_permu = num_permu // 2 if len(nozzle_indices.keys()) > 3 else num_permu
for permu in itertools.permutations(nozzle_indices.keys()):
if (num_permu := num_permu - 1) < 0:
break
nozzle_pattern_list.append([])
for idx in permu:
for _ in range(nozzle_heads[nozzle_indices[idx]]):
@ -93,8 +97,7 @@ def feeder_nozzle_pattern(component_data):
idx += 1
nozzle_points.pop(min_points_nozzle)
# nozzle_pattern_list = []
# nozzle_pattern_list.append(['CN220', 'CN220', 'CN065', 'CN065', 'CN140', 'CN140'])
return nozzle_pattern_list
@ -339,19 +342,20 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
assign_part_stack.pop(0)
assign_part_stack_points.pop(0)
nozzle_change_counter, average_slot = 0, []
nozzle_change_counter = 0
average_slot, average_head = [], []
for head, feeder_ in enumerate(feeder_assign):
if feeder_ < 0:
continue
average_slot.append((feeder_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1)
average_head.append(head)
if nozzle_pattern and component_data.loc[feeder_].nz != nozzle_pattern[head]:
nozzle_change_counter += 1
if len(average_slot) == 0:
continue
average_slot = sum(average_slot) / len(average_slot)
average_slot = sum(average_slot) / len(average_slot) - sum(average_head) / len(average_head) * interval_ratio
assign_value = 0
feeder_assign_points_cpy = feeder_assign_points.copy()
while True:
@ -777,6 +781,8 @@ def feeder_base_scan(component_data, pcb_data, feeder_data):
if cycle_nozzle == nozzle_mode[nozzle_insert_cycle]:
nozzle_mode_cycle[nozzle_insert_cycle] += 1
elif nozzle_insert_cycle + 1 < len(nozzle_mode) and cycle_nozzle == nozzle_mode[nozzle_insert_cycle + 1]:
nozzle_mode_cycle[nozzle_insert_cycle + 1] += 1
else:
nozzle_mode.insert(nozzle_insert_cycle + 1, cycle_nozzle)
nozzle_mode_cycle.insert(nozzle_insert_cycle + 1, 1)

View File

@ -295,10 +295,7 @@ def gurobi_optimizer(pcb_data, component_data, feeder_data, reduction=True, part
mdl.addConstrs(
2 * d[l, h] == quicksum(d_plus[j, h, l] for j in range(J)) + quicksum(d_minus[j, h, l] for j in range(J)) for l
in range(L - 1) for h in range(max_head_index))
mdl.addConstrs(2 * d[L - 1, h] == quicksum(d_plus[j, h, L - 1] for j in range(J)) + quicksum(
d_minus[j, h, L - 1] for j in range(J)) for h in range(max_head_index))
in range(L) for h in range(max_head_index))
mdl.addConstrs(NC[h] == quicksum(d[l, h] for l in range(L)) for h in range(max_head_index))

View File

@ -3,7 +3,7 @@ import copy
from base_optimizer.optimizer_common import *
def load_data(filename: str, load_feeder=False, auto_register=True):
def load_data(filename: str, load_feeder=True, auto_register=True):
filename = 'data/' + filename
part_content, step_content = False, False
part_start_line, step_start_line, part_end_line, step_end_line = -1, -1, -1, -1
@ -142,19 +142,20 @@ def load_data(filename: str, load_feeder=False, auto_register=True):
# 读取供料器基座数据
feeder_data = defaultdict(pd.DataFrame)
if load_feeder:
feeder_columns = ['slot', 'part', 'arg']
for machine_index in range(machine_num):
feeder_data[machine_index] = pd.DataFrame(columns=['slot', 'part', 'arg']) # arg表示是否为预分配不表示分配数目
feeder_data[machine_index] = pd.DataFrame(columns=feeder_columns) # arg表示是否为预分配不表示分配数目
for _, data in pcb_data[machine_index].iterrows():
slot, part = data['fdr'].split(' ')
if slot[0] != 'F' and slot[0] != 'R':
continue
slot = int(slot[1:]) if slot[0] == 'F' else int(slot[1:]) + max_slot_index // 2
feeder_data[machine_index] = pd.concat([feeder_data[machine_index], pd.DataFrame([slot, part, 1]).T])
feeder_data[machine_index] = pd.concat([feeder_data[machine_index], pd.DataFrame([slot, part, 1], index=feeder_columns).T], ignore_index=True)
feeder_data[machine_index].drop_duplicates(subset='slot', inplace=True, ignore_index=True)
# 随机移除部分已安装的供料器
drop_index = random.sample(list(range(len(feeder_data))), len(feeder_data) // 2)
feeder_data[machine_index].drop(index=drop_index, inplace=True)
# drop_index = random.sample(list(range(len(feeder_data))), len(feeder_data) // 2)
# feeder_data[machine_index].drop(index=drop_index, inplace=True)
feeder_data[machine_index].sort_values(by='slot', ascending=True, inplace=True, ignore_index=True)
@ -178,3 +179,4 @@ def merge_data(partial_pcb_data, partial_component_data):
component_data = component_data[component_data['points'] != 0].reset_index(drop=True)
return pcb_data, component_data

View File

@ -1,10 +1,58 @@
import copy
import random
from generator import *
from base_optimizer.optimizer_interface import *
def exact_assembly_time(pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
placement_result, head_sequence_result = greedy_placement_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
opt_res = OptResult(component_result, cycle_result, feeder_slot_result, placement_result, head_sequence_result)
info = placement_info_evaluation(component_data, pcb_data, opt_res)
# return info.metric()
return info.total_time
def error_info(pred_val, real_val, type='train'):
absolute_error = np.array([])
for idx, (t1, t2) in enumerate(np.nditer([pred_val, real_val])):
absolute_error = np.append(absolute_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if absolute_error[-1] > 15:
print(f'\033[0;31;31midx: {idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {absolute_error[-1]: .3f}\033[0m')
print('')
print(f'mean absolute prediction error for {type} data : {np.average(absolute_error): .2f}% ')
print(f'maximum absolute prediction error for {type} data : {np.max(absolute_error): .2f}% ')
def converter(pcb_data, component_data, assignment):
cp_items = defaultdict(list)
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
_, partial_component_data = convert_line_assigment(None, component_data, assignment)
for machine_index in range(len(assignment)):
cp_item_index = 0
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for _, data in partial_component_data[machine_index].iterrows():
feeder_limit, total_points = data.fdn, data.points
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
div_points = math.floor(total_points / feeder_limit)
if surplus_points:
div_points += 1
surplus_points -= 1
cp_points[cp_item_index], cp_nozzle[cp_item_index] = div_points, data.nz
cp_item_index += 1
cp_items[machine_index] = [cp_points, cp_nozzle, board_width, board_height]
return cp_items
class Net(torch.nn.Module):
def __init__(self, input_size, hidden_size=1000, output_size=1):
super(Net, self).__init__()
@ -61,7 +109,10 @@ class NeuralEstimator(Estimator):
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
self.net_file = 'model/net_model.pth'
if os.path.exists(self.net_file):
self.net.load_state_dict(torch.load(self.net_file))
try:
self.net.load_state_dict(torch.load(self.net_file))
except:
warnings.warn('the parameters of neural net model load failed', UserWarning)
def init_weights(self):
for m in self.net.modules():
@ -71,6 +122,7 @@ class NeuralEstimator(Estimator):
def training(self, params):
self.init_weights() # 初始化参数
data = data_mgr.loader('opt/' + params.train_file)
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
y_train = np.array(data[1][::data_mgr.get_update_round()])
@ -97,57 +149,23 @@ class NeuralEstimator(Estimator):
net_predict = self.net(x_train).view(-1)
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
error_info(pred_time, real_time)
pred_error = np.array([])
for t1, t2 in np.nditer([pred_time, real_time]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
print(f'mean square error for training data result : {mse: 2f} ')
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
torch.save(self.net.state_dict(), self.net_file)
# self.net.load_state_dict(torch.load(self.net_file))
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
self.net.eval()
with torch.no_grad():
pred_time = self.net(x_test).view(-1).cpu().detach().numpy()
# x_test = x_test.cpu().detach().numpy()
over_set = []
pred_idx, pred_error = 0, np.array([])
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if pred_error[-1] > 5:
over_set.append(pred_idx + 1)
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {pred_error[-1]: .3f}\033[0m')
# else:
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
pred_idx += 1
print('over:', over_set)
print('size:', len(over_set))
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
print(f'mean square error for test data result : {mse: 2f} ')
error_info(pred_time, y_test.reshape(-1), 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
assert board_width is not None and board_height is not None
@ -179,13 +197,7 @@ class HeuristicEstimator(Estimator):
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
error_info(y_fit, y_predict)
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
@ -193,13 +205,7 @@ class HeuristicEstimator(Estimator):
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_genetic(cp_points, cp_nozzle)).reshape(1, -1))
@ -288,12 +294,12 @@ class HeuristicEstimator(Estimator):
return [nl, wl, ul]
class RegressionEstimator(Estimator):
class ReconfigEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/params_lr_model.pkl'
self.pickle_file = 'model/reconfig_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
@ -311,13 +317,7 @@ class RegressionEstimator(Estimator):
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
error_info(y_fit, y_predict)
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
@ -325,19 +325,15 @@ class RegressionEstimator(Estimator):
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))[0, 0]
def heuristic_reconfig(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
if total_point_number == 0:
return [total_point_number, task_block_number]
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
@ -437,13 +433,7 @@ class SVREstimator(Estimator):
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
error_info(data[1], predict_val)
def sos_svr_training(self, x_train, y_train):
population = []
@ -533,14 +523,7 @@ class SVREstimator(Estimator):
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
error_info(data[1], predict_val, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
@ -564,18 +547,29 @@ class SVREstimator(Estimator):
return np.average(pred_error)
def exact_assembly_time(pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
placement_result, head_sequence_result = greedy_placement_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence_result)
# regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
# info.pickup_counter, info.total_points]]
# return self.lr.predict(regression_info)[0, 0]
return info.total_time
class MetricEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/metric_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
x_fit, y_fit = data_mgr.metric('opt/' + params.train_file)
self.lr.fit(x_fit, y_fit)
print(self.lr.coef_)
def testing(self, params):
x_fit, y_fit = data_mgr.metric('opt/' + params.test_file)
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
if __name__ == '__main__':
@ -587,13 +581,13 @@ if __name__ == '__main__':
help='determine whether saving the parameters of network, linear regression model, etc.')
parser.add_argument('--overwrite', default=False, type=bool,
help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data - bp.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data - bp.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=10000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=1000, type=int, help='size of training batch')
parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=8000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=2000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
parser.add_argument('--model', default='neural-network', help='method for assembly time estimation')
parser.add_argument('--machine_optimizer', default='feeder-scan', type=str, help='optimizer for single machine')
params = parser.parse_args()
data_mgr = DataMgr()
@ -611,14 +605,13 @@ if __name__ == '__main__':
# data_mgr.remover() # remove the last saved data
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
info = base_optimizer(1, pcb_data, component_data,
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method='feeder-scan', hinter=True)
info = base_optimizer(1, pcb_data, component_data, pd.DataFrame(columns=['slot', 'part', 'arg']),
params, hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data)
f.close()
estimator = NeuralEstimator()
estimator = MetricEstimator()
estimator.training(params)
estimator.testing(params)

View File

@ -8,10 +8,10 @@ from base_optimizer.optimizer_common import *
class DataMgr:
def __init__(self):
self.min_placement_points = 100
self.max_placement_points = 800
self.min_placement_points = 10
self.max_placement_points = 1000
self.max_component_types = 40
self.max_component_types = 30
self.default_feeder_limit = 1
self.max_nozzle_types = 4
@ -21,7 +21,7 @@ class DataMgr:
self.counter = 0
self.update = 1
self.pre_file = None
self.part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points']
self.part_col = ["part", "fdr", "nz", 'fdn', 'points']
self.component_data = pd.DataFrame(columns=self.part_col) # the component list update for several rounds
def generator(self, mode='Train'):
@ -29,7 +29,7 @@ class DataMgr:
if boundary[0] < boundary[-1]:
boundary[0], boundary[-1] = boundary[-1], boundary[0]
nozzle_type_list = random.sample(['CN065', 'CN220', 'CN040', 'CN140'], self.max_nozzle_types)
nozzle_type_list = random.sample(['CN065', 'CN020', 'CN040', 'CN140'], self.max_nozzle_types)
# determine the nozzle type of component
if self.counter % self.get_update_round() == 0 or mode == 'test':
self.component_data = self.component_data.loc[[]]
@ -38,9 +38,9 @@ class DataMgr:
selected_nozzle = random.sample(nozzle_type_list, total_nozzles)
for cp_idx in range(min(random.randint(1, self.max_component_types), total_points)):
part, nozzle = 'C' + str(cp_idx), random.choice(selected_nozzle)
self.component_data = pd.concat([self.component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', self.default_feeder_limit, 0],
index=self.part_col).T], ignore_index=True)
self.component_data = pd.concat(
[self.component_data, pd.DataFrame([part, 'SM8', nozzle, 1, 0], index=self.part_col).T],
ignore_index=True)
random_fractions = np.random.rand(len(self.component_data))
normalized_fractions = random_fractions / random_fractions.sum()
@ -119,8 +119,8 @@ class DataMgr:
def encode(self, cp_points: defaultdict[str], cp_nozzle: defaultdict[str], board_width, board_height):
assert len(cp_points.keys()) == len(cp_nozzle.keys())
assert len(cp_nozzle.keys()) <= self.max_component_types and len(
set(cp_nozzle.values())) <= self.max_nozzle_types
assert len(set(cp_nozzle.values())) <= self.max_nozzle_types
# === general info ===
total_points = sum(points for points in cp_points.values())
total_component_types, total_nozzle_types = len(cp_points.keys()), len(set(cp_nozzle.values()))
@ -147,20 +147,6 @@ class DataMgr:
data.extend(nozzle_slice)
# === component info ===
# cp_items = [[component, points] for component, points in cp_points.items()]
# cp_items = sorted(cp_items, key=lambda x: (x[1], nz2idx[cp_nozzle[x[0]]] * 0.1 + x[1]), reverse=True)
# for component, points in cp_items:
# nozzle = cp_nozzle[component]
#
# data_slice = [0 for _ in range(self.max_nozzle_types)]
# data_slice[nz2idx[nozzle]] = points
# data.extend(data_slice)
#
# assert self.max_component_types >= total_component_types
# for _ in range(self.max_component_types - total_component_types):
# data.extend([0 for _ in range(self.max_nozzle_types)])
# === component info ===
comp_data_slice = defaultdict(list)
for idx in range(self.max_nozzle_types):
@ -178,7 +164,10 @@ class DataMgr:
data.extend(data_slice)
for idx in range(self.max_nozzle_types):
comp_data_slice[idx].extend([0 for _ in range(self.max_component_types - len(comp_data_slice[idx]))])
if len(comp_data_slice[idx]) <= self.max_component_types:
comp_data_slice[idx].extend([0 for _ in range(self.max_component_types - len(comp_data_slice[idx]))])
else:
comp_data_slice[idx] = comp_data_slice[idx][:self.max_component_types]
data.extend(comp_data_slice[idx])
return data
@ -404,17 +393,14 @@ class DataMgr:
ignore_index=True)
return pcb_data, component_data
def loader(self, file_path):
input_data, output_data = [], [] # 输入数据包含元件点数、吸嘴信息等,输出信息包含组装时间
# cycle_data, nozzle_change_data, anc_move_data, pickup_data, point_data = [], [], [], [], []
# pick_move_data, place_move_data = [], []
def loader(self, file_path, data_filter=True, hinter=False):
cp_data, point_data, time_data = [], [], []
with open(file_path, 'r') as file:
line = file.readline()
while line:
items = line.split('\t')
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# cp_width, cp_height = defaultdict(float), defaultdict(float)
for cp_idx in range((len(items) - 12) // 3):
points = int(items[14 + cp_idx * 3])
if points == 0:
@ -423,35 +409,59 @@ class DataMgr:
component_type, nozzle_type = items[12 + cp_idx * 3], items[13 + cp_idx * 3]
cp_points[component_type], cp_nozzle[component_type] = points, nozzle_type
# cp_width[component_type], cp_height[component_type] = float(items[15 + cp_idx * 5]), float(
# items[16 + cp_idx * 5])
if len(cp_points.keys()) > 20 or len(cp_points.keys()) < 5:
line = file.readline()
continue
board_width, board_height = float(items[7]), float(items[8])
# cycle_data.append(float(items[1]))
# nozzle_change_data.append(float(items[2]))
# anc_move_data.append(float(items[3]))
# pickup_data.append(float(items[4]))
# pick_move_data.append(float(items[5]))
# place_move_data.append(float(items[6]))
# point_data.append(sum(pt for pt in cp_points.values()))
cycle, nozzle_change_data, pickup_data = float(items[1]), float(items[2]), float(items[4])
point_data.append(sum(int(items[14 + cp_idx * 3]) for cp_idx in range((len(items) - 12) // 3)))
# assembly time data
output_data.append(float(items[0]))
time_data.append(float(items[0]))
cp_data.append([cp_points, cp_nozzle, board_width, board_height])
# train_data.append(self.encode(cp_points, cp_nozzle, float(items[7]), float(items[8])))
input_data.append([cp_points, cp_nozzle, board_width, board_height])
# train_data[-1].extend([cycle_data[-1], nozzle_change_data[-1], anc_move_data[-1], pickup_data[-1]])
line = file.readline()
# return train_data, time_data, cycle_data, nozzle_change_data, anc_move_data, pickup_data, pick_move_data, \
# place_move_data, point_data
if data_filter:
cph_data = [point_data[idx] / time_data[idx] * 3600 for idx in range(len(time_data))]
return [input_data, output_data]
w_quart = 0.6
Q1, Q3 = np.percentile(np.array(cph_data), 25), np.percentile(np.array(cph_data), 75)
indices = [i for i in range(len(cph_data)) if
Q1 - w_quart * (Q3 - Q1) <= cph_data[i] <= Q3 + w_quart * (Q3 - Q1)]
filter_cp_data, filter_time_data = [], []
for idx in indices:
filter_cp_data.append(cp_data[idx])
filter_time_data.append(time_data[idx])
else:
filter_cp_data, filter_time_data = cp_data, time_data
if hinter:
print(
f"# of sample: {len(cp_data)}, outlier : {(1 - len(filter_cp_data) / len(cp_data)) * 100: .2f}%, "
f"mean: {np.average(filter_time_data): .2f}, median: {np.median(filter_time_data): .2f}, "
f"max: {np.max(filter_time_data): .2f}, min: {np.min(filter_time_data): .2f}, "
f"std. dev: {np.std(filter_time_data): .2f}")
return [filter_cp_data, filter_time_data]
def metric(self, file_path):
metric_data, time_data = [], []
with open(file_path, 'r') as file:
line = file.readline()
while line:
items = line.split('\t')
# cycle, nozzle change, anc move, pick up, pick distance, place distance, point
metric_data.append([float(items[i]) for i in list(range(1, 7))])
metric_data[-1].extend([sum(int(items[14 + cp_idx * 3]) for cp_idx in range((len(items) - 12) // 3))])
# assembly time data
time_data.append(float(items[0]))
line = file.readline()
return [metric_data, time_data]
def neural_encode(self, input_data):
train_data = []
@ -468,7 +478,7 @@ class DataMgr:
# train_data.append(
# [len(cp_points.keys()), len(cp_nozzle.keys()), sum(cp_points.values()), board_width, board_height])
# return train_data
#
# def get_feature(self):
# return 5

View File

@ -203,39 +203,37 @@ def line_optimizer_genetic(component_data, machine_number):
# population initialization
population = selective_initialization(sorted(cp_points.items(), key=lambda x: x[0]), cp_feeders, population_size,
machine_number)
# calculate fitness value
pop_val = [cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)[0] for individual in
population]
with tqdm(total=n_generations) as pbar:
pbar.set_description('genetic algorithm process for PCB assembly line balance')
new_population = []
for _ in range(n_generations):
# calculate fitness value
pop_val = []
for individual in population:
val, assigned_points = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
pop_val.append(val)
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
val, _ = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
pop_val.append(val)
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
# min-max convert
max_val = max(pop_val)
pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(pop_val) + 1e-10
pop_val = [v / sum_pop_val + 1e-3 for v in pop_val]
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(pop_val)
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(pop_val)
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break

View File

@ -1,11 +1,3 @@
import os
import pickle
import random
import numpy as np
import pandas as pd
import torch.nn
from base_optimizer.optimizer_interface import *
from generator import *
from estimator import *
@ -22,8 +14,6 @@ class Heuristic:
class LeastPoints(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_points = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
@ -35,8 +25,6 @@ class LeastPoints(Heuristic):
class LeastNzTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nozzle = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
@ -51,11 +39,7 @@ class LeastNzTypes(Heuristic):
class LeastCpTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_types = []
if len(machine_assign) == 1:
return machine_assign[0]
for machine_idx in machine_assign:
machine_types.append(
len(cp_assign[machine_idx]) + 1e-5 * sum(cp_points[cp] for cp in cp_assign[machine_idx]))
@ -65,20 +49,16 @@ class LeastCpTypes(Heuristic):
class LeastCpNzRatio(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nz_type, machine_cp_type = [], []
if len(machine_assign) == 1:
return machine_assign[0]
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nz_type.append(set(cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]))
machine_cp_type.append(len(cp_assign[machine_idx]))
min_idx = np.argmin([(machine_cp_type[idx] + 1e-5 * sum(cp_points[c] for c in cp_assign[idx])) / (
len(machine_nz_type[idx]) + 1e-5) for idx in range(len(machine_assign))])
min_idx = np.argmin([(machine_cp_type[idx] + 1e-5 * sum(
cp_points[c] for c in cp_assign[machine_assign[idx]])) / (len(machine_nz_type[idx]) + 1e-5) for idx in
range(len(machine_assign))])
return machine_assign[min_idx]
@ -104,8 +84,6 @@ def nozzle_assignment(cp_points, cp_nozzle, cp_assign):
class LeastCycle(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_cycle = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
@ -123,8 +101,6 @@ class LeastCycle(Heuristic):
class LeastNzChange(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_nozzle_change = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
@ -144,8 +120,6 @@ class LeastNzChange(Heuristic):
class LeastPickup(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
if len(machine_assign) == 1:
return machine_assign[0]
machine_pick_up = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
@ -208,18 +182,37 @@ def population_initialization(population_size, heuristic_map, cp_points):
def convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, component_list, individual,
machine_number):
component_number = len(cp_feeders.keys())
cp_assign = [[] for _ in range(machine_number)]
machine_all, machine_assign = list(range(machine_number)), defaultdict(set)
component_machine_assign = [[0 for _ in range(machine_number)] for _ in range(component_number)]
machine_assign_counter = [0 for _ in range(machine_number)]
data_mgr = DataMgr()
for idx, div_cp_idx in enumerate(component_list):
h = individual[idx % len(individual)]
cp_idx = cp_index[div_cp_idx]
if len(machine_assign[cp_idx]) < cp_feeders[cp_idx]:
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_all)
machine_assign = [] # 可被分配的机器索引
if sum(component_machine_assign[cp_idx][:]) < cp_feeders[cp_idx]:
for machine_idx in range(machine_number):
if component_machine_assign[cp_idx][machine_idx] or machine_assign_counter[
machine_idx] < data_mgr.max_component_types:
machine_assign.append(machine_idx)
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_assign)
else:
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, list(machine_assign[cp_idx]))
for machine_idx in range(machine_number):
if component_machine_assign[cp_idx][machine_idx]:
machine_assign.append(machine_idx)
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_assign)
cp_assign[machine_idx].append(div_cp_idx)
machine_assign[cp_idx].add(machine_idx)
if component_machine_assign[cp_idx][machine_idx] == 0:
machine_assign_counter[machine_idx] += 1
component_machine_assign[cp_idx][machine_idx] = 1
return cp_assign
@ -292,30 +285,34 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
'g': LeastNzChange,
'u': LeastPickup,
}
division_part = []
for _, data in component_data.iterrows():
division_part.extend([data.points / data.fdn for _ in range(data.fdn)])
division_points = sum(division_part) / len(division_part)
# genetic-based hyper-heuristic
crossover_rate, mutation_rate = 0.6, 0.1
population_size, n_generations = 20, 50
n_iterations = 10
population_size, total_generation = 20, 50
group_size = 10
estimator = NeuralEstimator()
best_val = None
best_heuristic_list = None
best_component_list = None
best_heuristic_list, best_component_list = None, None
cp_feeders, cp_nozzle = defaultdict(int), defaultdict(str)
cp_points, cp_index = defaultdict(int), defaultdict(int)
division_component_data = pd.DataFrame(columns=component_data.columns)
division_points = min(component_data['points'])
idx = 0
for cp_idx, data in component_data.iterrows():
cp_feeders[cp_idx] = data['fdn']
cp_feeders[cp_idx] = data.fdn
division_data = copy.deepcopy(data)
feeder_limit, total_points = division_data.fdn, division_data.points
feeder_limit = max(total_points // division_points * 3, feeder_limit)
if feeder_limit != 1:
feeder_limit = round(min(max(total_points // division_points * 1.5, feeder_limit), total_points))
# feeder_limit = total_points # 小规模数据启用
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
division_data.fdn, division_data.points = 1, math.floor(total_points / feeder_limit)
@ -331,10 +328,9 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
component_list = [idx for idx, data in division_component_data.iterrows() if data.points > 0]
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
with tqdm(total=n_generations * n_iterations) as pbar:
with tqdm(total=total_generation * group_size) as pbar:
pbar.set_description('hyper-heuristic algorithm process for PCB assembly line balance')
for _ in range(n_iterations):
for _ in range(group_size):
random.shuffle(component_list)
new_population = []
population = population_initialization(population_size, heuristic_map, cp_points)
@ -346,16 +342,17 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
for _ in range(n_generations):
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
for _ in range(total_generation):
population += new_population
for individual in new_population:
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
# min-max convert
max_val = max(pop_val)
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
@ -383,8 +380,6 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
new_population.append(offspring1)
new_population.append(offspring2)
if len(new_population) >= population_size * crossover_rate:
break
pbar.update(1)
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
@ -407,6 +402,8 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
continue
val = max(val,
exact_assembly_time(partial_pcb_data[machine_idx], partial_component_data[machine_idx]))
if best_val is not None and val > best_val:
break
if best_val is None or val < best_val:
best_val = val
@ -415,7 +412,7 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width, board_height,
best_component_list, best_heuristic_list, machine_number, estimator)
print(val)
machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
best_component_list, best_heuristic_list, machine_number)
@ -423,7 +420,6 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
for machine_idx in range(machine_number):
for idx in machine_cp_points[machine_idx]:
assignment_result[machine_idx][cp_index[idx]] += cp_points[idx]
return assignment_result

View File

@ -1,7 +1,3 @@
import copy
import pandas as pd
from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import *
@ -10,7 +6,7 @@ def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
mdl = Model('pcb assembly line optimizer')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 600)
# mdl.setParam('TimeLimit', 0.01)
nozzle_type, component_type = [], []
for _, data in component_data.iterrows():
@ -18,11 +14,6 @@ def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
nozzle_type.append(data.nz)
component_type.append(data.part)
average_pos = 0
for _, data in pcb_data.iterrows():
average_pos += data.x
slot_start = int(round(average_pos / len(pcb_data) + stopper_pos[0] - slotf1_pos[0]) / slot_interval) + 1
ratio = 1
J = len(nozzle_type)
N = 10000
@ -31,8 +22,8 @@ def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
H = max_head_index
I = len(component_data)
S = min(len(component_data) * ratio, 60)
K = len(pcb_data)
K = math.ceil(len(pcb_data) * 1.0 / H / M) + 1
# K = 3
CompOfNozzle = [[0 for _ in range(J)] for _ in range(I)] # Compatibility
component_point = [0 for _ in range(I)]
@ -43,85 +34,88 @@ def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
# objective related
g = mdl.addVars(list_range(K), list_range(M), vtype=GRB.BINARY)
d = mdl.addVars(list_range(K - 1), list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
# u = mdl.addVars(list_range(K), list_range(M), vtype=GRB.INTEGER)
d_plus = mdl.addVars(list_range(J), list_range(H), list_range(K - 1), list_range(M), vtype=GRB.CONTINUOUS)
d_minus = mdl.addVars(list_range(J), list_range(H), list_range(K - 1), list_range(M), vtype=GRB.CONTINUOUS)
d = mdl.addVars(list_range(K), list_range(H), list_range(M), lb=0, vtype=GRB.CONTINUOUS)
u = mdl.addVars(list_range(I), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
v = mdl.addVars(list_range(S), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
d_plus = mdl.addVars(list_range(J), list_range(K), list_range(H), list_range(M), lb=0, vtype=GRB.CONTINUOUS)
d_minus = mdl.addVars(list_range(J), list_range(K), list_range(H), list_range(M), lb=0, vtype=GRB.CONTINUOUS)
w = mdl.addVars(list_range(K), list_range(M), vtype=GRB.CONTINUOUS)
e = mdl.addVars(list_range(-(H - 1) * ratio, S), list_range(K), list_range(M), vtype=GRB.BINARY)
f = mdl.addVars(list_range(S), list_range(I), list_range(M), vtype=GRB.BINARY, name='')
x = mdl.addVars(list_range(I), list_range(S), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
n = mdl.addVars(list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
obj = mdl.addVar(lb=0, ub=N, vtype=GRB.CONTINUOUS)
mdl.addConstrs(g[k, m] >= g[k + 1, m] for k in range(K - 1) for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for i in range(I) for s in range(S)) <= g[k, m] for k in range(K) for h in range(H)
for m in range(M))
quicksum(u[i, k, h, m] for i in range(I)) <= g[k, m] for k in range(K) for h in range(H) for m in range(M))
# nozzle no more than 1 for head h and cycle k
mdl.addConstrs(
quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S) for j in range(J)) <= 1 for k
in range(K) for h in range(H) for m in range(M))
# nozzle available number constraint
mdl.addConstrs(
quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S) for h in range(H)) <= H for k
in range(K) for j in range(J) for m in range(M))
mdl.addConstrs(quicksum(CompOfNozzle[i][j] * u[i, k, h, m] for i in range(I) for j in range(J)) <= 1 for k
in range(K) for h in range(H) for m in range(M))
# work completion
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for s in range(S) for k in range(K) for h in range(H) for m in range(M)) ==
component_point[i] for i in range(I))
quicksum(u[i, k, h, m] for k in range(K) for h in range(H) for m in range(M)) == component_point[i] for i in
range(I))
# nozzle change
mdl.addConstrs(quicksum(CompOfNozzle[i][j] * x[i, s, k, h, m] for i in range(I) for s in range(S)) - quicksum(
CompOfNozzle[i][j] * x[i, s, k + 1, h, m] for i in range(I) for s in range(S)) == d_plus[j, h, k, m] - d_minus[
j, h, k, m] for k in range(K - 1) for j in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(quicksum(CompOfNozzle[i][j] * u[i, k, h, m] for i in range(I)) - quicksum(
CompOfNozzle[i][j] * u[i, k + 1, h, m] for i in range(I)) == d_plus[j, k, h, m] - d_minus[j, k, h, m] for k in
range(K - 1) for j in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(2 * d[k, h, m] == quicksum(d_plus[j, h, k, m] for j in range(J)) + quicksum(
d_minus[j, h, k, m] for j in range(J)) for k in range(K - 1) for h in range(H) for m in range(M))
mdl.addConstrs(quicksum(CompOfNozzle[i][j] * u[i, K - 1, h, m] for i in range(I)) - quicksum(
CompOfNozzle[i][j] * u[i, 0, h, m] for i in range(I)) == d_plus[j, K - 1, h, m] - d_minus[j, K - 1, h, m] for j
in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(n[h, m] == quicksum(d[k, h, m] for k in range(K - 1)) - 0.5 for h in range(H) for m in range(M))
mdl.addConstrs(2 * d[k, h, m] == quicksum(d_plus[j, k, h, m] for j in range(J)) + quicksum(
d_minus[j, k, h, m] for j in range(J)) - 1 for k in range(K) for h in range(H) for m in range(M))
# simultaneous pick
for s in range(-(H - 1) * ratio, S):
rng = list(range(max(0, -math.floor(s / ratio)), min(H, math.ceil((S - s) / ratio))))
for k in range(K):
mdl.addConstrs(
quicksum(x[i, s + h * ratio, k, h, m] for h in rng for i in range(I)) <= N * e[s, k, m] for m in
range(M))
quicksum(u[i, k, h, m] * v[s + h * ratio, k, h, m] for h in rng for i in range(I)) <= N * e[s, k, m] for
m in range(M))
mdl.addConstrs(
quicksum(x[i, s + h * ratio, k, h, m] for h in rng for i in range(I)) >= e[s, k, m] for m in range(M))
quicksum(u[i, k, h, m] * v[s + h * ratio, k, h, m] for h in rng for i in range(I)) >= e[s, k, m] for m
in range(M))
# pickup movement
# mdl.addConstrs(u[k, m] >= s1 * e[s1, k, m] - s2 * e[s2, k, m] for s1 in range(-(H - 1) * ratio, S) for s2 in
# range(-(H - 1) * ratio, S) for k in range(K))
# head - feeder slot relationship
mdl.addConstrs(
quicksum(v[s, k, h, m] for s in range(S)) == quicksum(u[i, k, h, m] for i in range(I)) for h in range(H) for k
in range(K) for m in range(M))
# feeder related
mdl.addConstrs(quicksum(f[s, i, m] for s in range(S) for m in range(M)) <= 1 for i in range(I))
mdl.addConstrs(quicksum(f[s, i, m] for s in range(S) for m in range(M)) <= component_data.iloc[i].fdn for i in range(I))
mdl.addConstrs(quicksum(f[s, i, m] for i in range(I)) <= 1 for s in range(S) for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for h in range(H) for k in range(K)) >= f[s, i, m] for i in range(I) for s in range(S)
for m in range(M))
quicksum(u[i, k, h, m] * v[s, k, h, m] for h in range(H) for k in range(K)) >= f[s, i, m] for i in range(I) for
s in range(S) for m in range(M))
mdl.addConstrs(
quicksum(x[i, s, k, h, m] for h in range(H) for k in range(K)) <= N * f[s, i, m] for i in range(I) for s in
range(S) for m in range(M))
mdl.addConstrs(
quicksum(f[s, i, m] for i in range(I)) >= quicksum(f[s + 1, i, m] for i in range(I)) for s in range(S - 1) for m
in range(M))
quicksum(u[i, k, h, m] * v[s, k, h, m] for h in range(H) for k in range(K)) <= N * f[s, i, m] for i in range(I)
for s in range(S) for m in range(M))
# pickup movement
mdl.addConstrs(w[k, m] >= s1 * e[s1, k, m] - s2 * e[s2, k, m] + N * (e[s1, k, m] + e[s2, k, m] - 2) for s1 in
range(-(H - 1) * ratio, S) for s2 in range(-(H - 1) * ratio, S) for k in range(K) for m in range(M))
# objective
T_cy, T_nz, T_pu, T_pl = 2, 3, 1, 1
mdl.addConstrs(obj >= T_cy * quicksum(g[k, m] for k in range(K)) + T_nz * quicksum(
d[k, h, m] for h in range(H) for k in range(K - 1)) + T_pl * quicksum(
e[s, k, m] for s in range(-(H - 1) * ratio, S) for k in range(K)) + T_pl * quicksum(
x[i, s, k, h, m] for i in range(I) for s in range(S) for k in range(K) for h in range(H)) for m in range(M))
mdl.addConstrs(obj >= Fit_cy * quicksum(g[k, m] for k in range(K)) + Fit_nz * 2 * quicksum(
d[k, h, m] for h in range(H) for k in range(K)) + Fit_pu * quicksum(
e[s, k, m] for s in range(-(H - 1) * ratio, S) for k in range(K)) + Fit_pl * quicksum(
u[i, k, h, m] for i in range(I) for k in range(K) for h in range(H)) + Fit_mv * head_interval * quicksum(
w[k, m] for k in range(K)) for m in range(M))
mdl.setObjective(obj, GRB.MINIMIZE)
mdl.optimize()
for m in range(M):
print(f'machine {m} : cycle : {sum(g[k, m].x for k in range(K))}, '
f'nozzle change : {sum(d[k, h, m].x for h in range(H) for k in range(K))}, '
f'pick up : {sum(e[s, k, m].x for s in range(-(H - 1) * ratio, S) for k in range(K))}, '
f'placement : {sum(u[i, k, h, m].x for i in range(I) for k in range(K) for h in range(H))}, '
f'pick movement : {sum(w[k, m].x for k in range(K))}')
pcb_part_indices = defaultdict(list)
for idx, data in pcb_data.iterrows():
@ -131,42 +125,52 @@ def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
for m in range(M):
partial_component_data, partial_pcb_data = copy.deepcopy(component_data), pd.DataFrame(columns=pcb_data.columns)
partial_component_data['points'] = 0
part_index = defaultdict(int)
for idx, data in component_data.iterrows():
part_index[data.part] = idx
component_result, cycle_result, feeder_slot_result = [], [], []
head_place_pos = []
for k in range(K):
if abs(g[k, m].x) < 1e-3:
continue
component_result.append([-1 for _ in range(H)])
cycle_result.append(1)
feeder_slot_result.append([-1 for _ in range(H)])
for h in range(H):
for i in range(I):
for s in range(S):
if abs(x[i, s, k, h, m].x) < 1e-3:
continue
if component_result[-1][h] != -1:
assert 1
component_result[-1][h] = i
feeder_slot_result[-1][h] = slot_start + s * 2
if abs(u[i, k, h, m].x) < 1e-3:
continue
component_result[-1][h] = i
idx = pcb_part_indices[component_data.iloc[i].part][0]
partial_pcb_data = pd.concat([partial_pcb_data, pd.DataFrame(pcb_data.iloc[idx]).T])
head_place_pos.append(pcb_data.iloc[idx].x - h * head_interval)
pcb_part_indices[component_data.iloc[i].part].pop(0)
partial_component_data.loc[i, 'points'] += 1
for s in range(S):
if abs(v[s, k, h, m].x) < 1e-3:
continue
feeder_slot_result[-1][h] = s
average_pos = round(
(sum(head_place_pos) / len(head_place_pos) + stopper_pos[0] - slotf1_pos[0] + 1) / slot_interval)
print(f'average_pos: {average_pos}')
for k in range(len(feeder_slot_result)):
for h in range(H):
if feeder_slot_result[k][h] == -1:
continue
feeder_slot_result[k][h] = feeder_slot_result[k][h] * 2 + average_pos
idx = pcb_part_indices[component_data.iloc[i].part][0]
partial_pcb_data = pd.concat([partial_pcb_data, pd.DataFrame(pcb_data.iloc[idx]).T])
pcb_part_indices[component_data.iloc[i].part].pop(0)
partial_component_data.loc[i, 'points'] += 1
print(component_result)
print(cycle_result)
print(feeder_slot_result)
placement_result, head_sequence = greedy_placement_route_generation(partial_component_data, partial_pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
print('----- Placement machine ' + str(m + 1) + ' ----- ')
info = placement_info_evaluation(partial_component_data, partial_pcb_data, component_result, cycle_result,
feeder_slot_result, placement_result, head_sequence, hinter=False)
optimization_assign_result(partial_component_data, partial_pcb_data, component_result, cycle_result,
feeder_slot_result, nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
opt_res = OptResult(component_result, cycle_result,feeder_slot_result, placement_result, head_sequence)
info = placement_info_evaluation(partial_component_data, partial_pcb_data, opt_res, hinter=False)
optimization_assign_result(partial_component_data, partial_pcb_data, opt_res, nozzle_hinter=True,
component_hinter=True, feeder_hinter=True)
info.print()
assembly_info.append(info)
print('------------------------------ ')

View File

@ -33,7 +33,7 @@ def random_component_assignment(pcb_data, component_data, machine_number, estima
machine_assign = list(range(machine_number))
random.shuffle(machine_assign)
finished_assign_counter = 0
finished_assign_counter = component_points.count(0)
while finished_assign_counter < component_number:
for machine_index in machine_assign:
part = random.randint(0, component_number - 1)
@ -53,16 +53,13 @@ def random_component_assignment(pcb_data, component_data, machine_number, estima
finished_assign_counter += 1
assert sum(component_points) == 0
val = 0
if estimator:
cp_items = estimator.convert(pcb_data, component_data, assignment_result)
for machine_index in range(machine_number):
cp_points, cp_nozzle, cp_width, cp_height, board_width, board_height = cp_items[machine_index]
# objective_value.append(
# estimator.neural_network(cp_points, cp_nozzle, cp_width, cp_height, board_width, board_height))
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
objective_value = 0
cp_items = converter(pcb_data, component_data, assignment_result)
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
objective_value = max(objective_value, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
return val, assignment_result
return objective_value, assignment_result
def greedy_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight):
@ -75,6 +72,7 @@ def local_search_component_assignment(pcb_data, component_data, machine_number,
component_number = len(component_data)
iteration_counter, unsuccessful_iteration_counter = 5000, 50
optimal_val, optimal_assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
for _ in range(iteration_counter):
machine_idx = random.randint(0, machine_number - 1)
if sum(optimal_assignment[machine_idx]) == 0:
@ -90,7 +88,8 @@ def local_search_component_assignment(pcb_data, component_data, machine_number,
assignment = copy.deepcopy(optimal_assignment)
cyclic_counter = 0
swap_machine_idx = None
while cyclic_counter <= 2 * machine_idx:
swap_available = False
while cyclic_counter <= 2 * machine_number:
cyclic_counter += 1
swap_machine_idx = random.randint(0, machine_number - 1)
feeder_available = 0
@ -99,16 +98,18 @@ def local_search_component_assignment(pcb_data, component_data, machine_number,
feeder_available += 1
if feeder_available <= component_data.iloc[part_idx].fdn and swap_machine_idx != machine_idx:
swap_available = True
break
assert swap_machine_idx is not None
assignment[machine_idx][part_idx] -= r
assignment[swap_machine_idx][part_idx] += r
if swap_available:
assignment[machine_idx][part_idx] -= r
assignment[swap_machine_idx][part_idx] += r
val = 0
cp_items = estimator.convert(pcb_data, component_data, assignment)
cp_items = converter(pcb_data, component_data, assignment)
for machine_index in range(machine_number):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
if val < optimal_val:
optimal_assignment, optimal_val = assignment, val
@ -183,12 +184,6 @@ def reconfig_crossover_operation(component_data, parent1, parent2, machine_numbe
offspring[machine_index][part_index] += 1
additional_points -= 1
# === 结果校验 ===
for offspring in [offspring1, offspring2]:
for part in range(component_number):
pt = sum(offspring[mt][part] for mt in range(machine_number))
assert pt == component_data.iloc[part]['points']
return offspring1, offspring2
@ -205,18 +200,23 @@ def reconfig_mutation_operation(component_data, parent, machine_number):
for component_index, points in enumerate(offspring[swap_machine1]):
if points:
component_list.append(component_index)
if len(component_list) == 0:
return offspring
swap_component_index = random.sample(component_list, 1)[0]
swap_points = random.randint(1, offspring[swap_machine1][swap_component_index])
feeder_counter = 0
for machine_index in range(machine_number):
if offspring[swap_machine1][swap_component_index] < swap_points or machine_index == swap_machine2:
feeder_counter += 1
if feeder_counter > component_data.iloc[swap_component_index].fdn:
return offspring
offspring[swap_machine1][swap_component_index] -= swap_points
offspring[swap_machine2][swap_component_index] += swap_points
feeder_counter = 0
for machine_index in range(machine_number):
if offspring[machine_index][swap_component_index]:
feeder_counter += 1
if feeder_counter > component_data.iloc[swap_component_index].fdn:
return parent
return offspring
@ -229,38 +229,35 @@ def evolutionary_component_assignment(pcb_data, component_data, machine_number,
generation_number = 100
mutation_rate, crossover_rate = 0.1, 0.8
population = []
population, pop_val = [], []
for _ in range(population_size):
population.append(random_component_assignment(pcb_data, component_data, machine_number, None)[1])
population.append(random_component_assignment(pcb_data, component_data, machine_number, estimator)[1])
cp_items = converter(pcb_data, component_data, population[-1])
val = 0
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
pop_val.append(val)
with tqdm(total=generation_number) as pbar:
pbar.set_description('evolutionary algorithm process for PCB assembly line balance')
new_population = []
for _ in range(generation_number):
population += new_population
# calculate fitness value
pop_val = []
for individual in population:
for individual in new_population:
val = 0
cp_items = estimator.convert(pcb_data, component_data, individual)
cp_items = converter(pcb_data, component_data, individual)
for machine_index in range(machine_number):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
pop_val.append(val)
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
cp_items = estimator.convert(pcb_data, component_data, individual)
val = 0
for machine_index in range(machine_index):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
pop_val.append(val)
# min-max convert
max_val = max(pop_val)
pop_val_sel = list(map(lambda v: max_val - v, pop_val))
@ -298,11 +295,12 @@ def evolutionary_component_assignment(pcb_data, component_data, machine_number,
def line_optimizer_reconfiguration(component_data, pcb_data, machine_number):
# === assignment of heads to modules is omitted ===
optimal_assignment, optimal_val = [], None
estimator = RegressionEstimator() # element from list [0, 1, 2, 5, 10] task_block ~= cycle
estimator = ReconfigEstimator() # element from list [0, 1, 2, 5, 10] task_block ~= cycle
# === assignment of components to heads
for i in range(5):
if i == 0:
# random
print('random component allocation algorithm process for PCB assembly line balance')
val, assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
elif i == 1:
# brute force
@ -310,6 +308,7 @@ def line_optimizer_reconfiguration(component_data, pcb_data, machine_number):
continue
elif i == 2:
# local search
print('local search component allocation algorithm process for PCB assembly line balance')
val, assignment = local_search_component_assignment(pcb_data, component_data, machine_number, estimator)
elif i == 3:
# evolutionary

View File

@ -1,10 +0,0 @@
# implementation of
# <<Hybrid spider monkey optimisation algorithm for multi-level planning and scheduling problems of assembly lines>>
def assemblyline_optimizer_spidermonkey(pcb_data, component_data):
# number of swarms: 10
# maximum number of groups: 5
# number of loops: 100
# food source population: 50
# mutation rate: 0.1 # crossover rate: 0.9
# computation time(s): 200
pass

View File

@ -1,6 +1,4 @@
import random
import numpy as np
import time
from dataloader import *
from lineopt_genetic import line_optimizer_genetic
@ -12,32 +10,32 @@ from lineopt_model import line_optimizer_model
from base_optimizer.optimizer_interface import *
def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number):
assembly_info = []
if line_optimizer == 'hyper-heuristic' or line_optimizer == 'heuristic' or line_optimizer == 'genetic' or \
line_optimizer == 'reconfiguration':
if machine_number > 1:
if line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, machine_number)
elif line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, machine_number)
else:
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, machine_number)
else:
assignment_result = [[]]
for _, data in component_data.iterrows():
assignment_result[-1].append(data.points)
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
def optimizer(pcb_data, component_data, params):
if params.machine_number == 1:
assembly_info = [
base_optimizer(1, pcb_data, component_data, pd.DataFrame(columns=['slot', 'part', 'arg']), params,
hinter=True)]
return assembly_info
for machine_index in range(machine_number):
assembly_info.append(
base_optimizer(machine_index + 1, partial_pcb_data[machine_index], partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method=machine_optimizer,
hinter=True))
elif line_optimizer == 'model':
assembly_info = line_optimizer_model(component_data, pcb_data, machine_number)
if params.line_optimizer == 'hyper-heuristic' or params.line_optimizer == 'heuristic' or params.line_optimizer \
== 'genetic' or params.line_optimizer == 'reconfiguration':
if params.line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, params.machine_number)
elif params.line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, params.machine_number)
elif params.line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, params.machine_number)
else:
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, params.machine_number)
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
assembly_info = []
for machine_index in range(params.machine_number):
assembly_info.append(base_optimizer(machine_index + 1, partial_pcb_data[machine_index],
partial_component_data[machine_index],
pd.DataFrame(columns=['slot', 'part', 'arg']), params, hinter=True))
elif params.line_optimizer == 'model':
assembly_info = line_optimizer_model(component_data, pcb_data, params.machine_number)
else:
raise 'line optimizer method is not existed'
@ -50,71 +48,151 @@ def main():
# 参数解析
parser = argparse.ArgumentParser(description='assembly line optimizer implementation')
parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization '
'for data analysis, 1 -optimize pcb data')
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
'for data analysis, 1 -optimize pcb data, 2 -batch test')
parser.add_argument('--filename', default='L01/KAN3-Z2.txt', type=str, help='load pcb data')
parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_number', default=2, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder-scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='hyper-heuristic', type=str, help='optimizer for PCB assembly line')
# parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB assembly line')
# parser.add_argument('--line_optimizer', default='model', type=str, help='optimizer for PCB assembly line')
parser.add_argument('--feeder_limit', default=1, type=int, help='the upper feeder limit for each type of component')
parser.add_argument('--save', default=1, type=int, help='save the optimization result')
parser.add_argument('--save_suffix', default='(10)', type=str, help='load pcb data')
params = parser.parse_args()
# 结果输出显示所有行和列
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
if params.mode == 0:
partial_pcb_data, partial_component_data, _ = load_data(params.filename)
partial_pcb_data, partial_component_data, _ = load_data(params.filename, load_feeder=False)
assembly_info = []
for machine_index in range(len(partial_pcb_data)):
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = \
convert_pcbdata_to_result(partial_pcb_data[machine_index], partial_component_data[machine_index])
print('----- Placement machine ' + str(machine_index) + ' ----- ')
opt_res = convert_pcbdata_to_result(partial_pcb_data[machine_index], partial_component_data[machine_index])
info = placement_info_evaluation(partial_component_data[machine_index], partial_pcb_data[machine_index],
component_result, cycle_result, feeder_slot_result, placement_result,
head_sequence)
opt_res)
assembly_info.append(info)
optimization_assign_result(partial_component_data[machine_index], partial_pcb_data[machine_index],
component_result, cycle_result, feeder_slot_result, nozzle_hinter=True,
component_hinter=True, feeder_hinter=True)
optimization_assign_result(partial_component_data[machine_index], partial_pcb_data[machine_index], opt_res,
nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
info.print()
if params.save:
output_optimize_result(f'result/{params.filename[:-4]}-T-Solution-M0{machine_index + 1}',
partial_component_data[machine_index], partial_pcb_data[machine_index], opt_res)
print('------------------------------ ')
else:
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
elif params.mode == 1:
sys.stdout = open(f'record/{params.filename[:-4]}-{params.line_optimizer}.txt', 'w')
# 加载PCB数据
partial_pcb_data, partial_component_data, _ = load_data(params.filename)
pcb_data, component_data = merge_data(partial_pcb_data, partial_component_data)
start_time = time.time()
assembly_info = optimizer(pcb_data, component_data, params)
sys.stdout = sys.__stdout__
print(f'optimizer running time: {time.time() - start_time: .3f}')
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
assembly_info = optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer,
params.machine_number)
print(f'assembly metric evaluation {max(info.metric() for info in assembly_info)}')
# index_list, part_list = [5, 6, 7, 8, 9, 10, 11, 12, 13], []
# for idx in index_list:
# part_list.append(component_data.iloc[idx].part)
# pcb_data = pcb_data[pcb_data['part'].isin(part_list)].reset_index(drop=True)
# component_data = component_data.iloc[index_list].reset_index(drop=True)
#
# from lineopt_hyperheuristic import DataMgr, Net
# data_mgr = DataMgr()
#
# cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# for _, data in component_data.iterrows():
# cp_points[data.part], cp_nozzle[data.part] = data.points, data.nz
#
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
#
# net.load_state_dict(torch.load('model/net_model.pth'))
# board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
# encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
# encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# print(f'net pred time: {net(encoding)[0, 0].item():.3f}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
else:
# line_optimizer = ['T-Solution', 'hyper-heuristic', 'genetic', 'reconfiguration']
line_optimizer = ['genetic']
file_dirs = ['L01', 'L02', 'L03']
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
running_round = 10
line_opt_result, line_opt_runtime = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
opt_columns = []
for line_opt in line_optimizer:
if line_opt == 'T-Solution':
opt_columns.append(line_opt)
else:
opt_columns.extend([line_opt + str(idx + 1) for idx in range(running_round)])
for file_dir in file_dirs:
line_opt_result[file_dir] = pd.DataFrame(columns=opt_columns)
line_opt_runtime[file_dir] = pd.DataFrame(columns=opt_columns)
line_opt_result[file_dir].index.name, line_opt_runtime[file_dir].index.name = 'file', 'file'
for file_index, file in enumerate(os.listdir('data/' + file_dir)):
sys.stdout = sys.__stdout__
print(f'--- {file_dir} : ({file_index + 1}) file {file} --- ')
try:
partial_pcb_data, partial_component_data, _ = load_data(file_dir + '/' + file, load_feeder=False)
except:
traceback.print_exc()
warning_info = f'file: {file_dir}/{file}: an unexpected error occurs for data loader'
warnings.warn(warning_info, SyntaxWarning)
continue
machine_number = len(partial_pcb_data)
if not os.path.exists(f'record/{file_dir}'):
os.makedirs(f'record/{file_dir}')
merge_pcb_data, merge_component_data = merge_data(partial_pcb_data, partial_component_data)
for line_opt in line_optimizer:
assembly_info = []
if line_opt == 'T-Solution':
sys.stdout = open(f'record/{file_dir}/{file[:-4]}-{line_opt}.txt', 'w')
for machine_index in range(machine_number):
opt_res = convert_pcbdata_to_result(partial_pcb_data[machine_index],
partial_component_data[machine_index])
print('----- Placement machine ' + str(machine_index + 1) + ' ----- ')
info = placement_info_evaluation(partial_component_data[machine_index],
partial_pcb_data[machine_index], opt_res, hinter=True)
print('------------------------------ ')
assembly_info.append(info)
if params.save:
output_optimize_result(f'result/{file_dir}/{file[:-4]}-T-Solution-M0{machine_index + 1}',
partial_component_data[machine_index],
partial_pcb_data[machine_index], opt_res)
line_opt_result[file_dir].loc[file, line_opt] = max(info.total_time for info in assembly_info)
for machine_idx, info in enumerate(assembly_info):
print(
f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
else:
for round_idx in range(running_round):
sys.stdout = open(f'record/{file_dir}/{file[:-4]}-{line_opt} ({round_idx + 1}).txt', 'w')
params = parser.parse_args(
['--filename', file_dir + '/' + file, '--machine_number', str(machine_number),
'--line_optimizer', line_opt, '--save_suffix', f'({round_idx + 1})'])
start_time = time.time()
assembly_info = optimizer(merge_pcb_data, merge_component_data, params)
line_opt_result[file_dir].loc[file, line_opt + str(round_idx + 1)] = max(
info.total_time for info in assembly_info)
line_opt_runtime[file_dir].loc[file, line_opt + str(round_idx + 1)] = time.time() - start_time
for machine_idx, info in enumerate(assembly_info):
print(
f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, '
f'total placement: {info.total_points}, '
f'total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
with pd.ExcelWriter('result/line_optimizer.xlsx', engine='openpyxl') as writer:
for file_dir, result in line_opt_result.items():
result.to_excel(writer, sheet_name='result-' + file_dir, float_format='%.3f', na_rep='')
for file_dir, running_time in line_opt_runtime.items():
running_time.to_excel(writer, sheet_name='running_time-' + file_dir, float_format='%.3f', na_rep='')
if __name__ == '__main__':